code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
#__all__ = ['deque', 'defaultdict', 'Counter']
from _collections import deque, defaultdict
#from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
# fixme brython.. there is an issue with _abcoll
#from _abcoll import *
#from _abcoll import Set
from _abcoll import MutableMapping
#import _abcoll
#__all__ += _abcoll.__all__
from collections.abc import *
import collections.abc
__all__ += collections.abc.__all__
from _collections import deque, defaultdict, namedtuple
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
#fixme brython
#from weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
class Set(set):
pass
class Sequence(list):
pass
def _proxy(obj):
return obj
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
#fixme brython.. Issue with _abcoll, which contains MutableMapping
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
#fixme, brython issue
#@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
#try: # Load C helper function if available
# from _collections import _count_elements
#except ImportError:
# pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
#super().__init__() #BE modified since super not supported
dict.__init__(self)
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
########################################################################
### ChainMap (helper for configparser)
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
#fixme, brython
#@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
def __repr__(self):
return ','.join(str(_map) for _map in self.maps)
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
#raise KeyError('Key not found in the first mapping: {!r}'.format(key))
raise KeyError('Key not found in the first mapping: %s' % key)
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
################################################################################
### UserString
################################################################################ | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefb_dsrole
version_added: '2.8'
short_description: Configure FlashBlade Management Directory Service Roles
description:
- Set or erase directory services role configurations.
author:
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
state:
description:
- Create or delete directory service role
default: present
type: str
choices: [ absent, present ]
role:
description:
- The directory service role to work on
choices: [ array_admin, ops_admin, readonly, storage_admin ]
type: str
group_base:
description:
- Specifies where the configured group is located in the directory
tree. This field consists of Organizational Units (OUs) that combine
with the base DN attribute and the configured group CNs to complete
the full Distinguished Name of the groups. The group base should
specify OU= for each OU and multiple OUs should be separated by commas.
The order of OUs is important and should get larger in scope from left
to right.
- Each OU should not exceed 64 characters in length.
type: str
group:
description:
- Sets the common Name (CN) of the configured directory service group
containing users for the FlashBlade. This name should be just the
Common Name of the group without the CN= specifier.
- Common Names should not exceed 64 characters in length.
type: str
extends_documentation_fragment:
- purestorage.fb
'''
EXAMPLES = r'''
- name: Delete existing array_admin directory service role
purefb_dsrole:
role: array_admin
state: absent
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Create array_admin directory service role
purefb_dsrole:
role: array_admin
group_base: "OU=PureGroups,OU=SANManagers"
group: pureadmins
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Update ops_admin directory service role
purefb_dsrole:
role: ops_admin
group_base: "OU=PureGroups"
group: opsgroup
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
HAS_PURITY_FB = True
try:
from purity_fb import DirectoryServiceRole
except ImportError:
HAS_PURITY_FB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_blade, purefb_argument_spec
def update_role(module, blade):
"""Update Directory Service Role"""
changed = False
role = blade.directory_services.list_directory_services_roles(names=[module.params['role']])
if role.items[0].group_base != module.params['group_base'] or role.items[0].group != module.params['group']:
try:
role = DirectoryServiceRole(group_base=module.params['group_base'],
group=module.params['group'])
blade.directory_services.update_directory_services_roles(names=[module.params['role']],
directory_service_role=role)
changed = True
except Exception:
module.fail_json(msg='Update Directory Service Role {0} failed'.format(module.params['role']))
module.exit_json(changed=changed)
def delete_role(module, blade):
"""Delete Directory Service Role"""
changed = False
try:
role = DirectoryServiceRole(group_base='',
group='')
blade.directory_services.update_directory_services_roles(names=[module.params['role']],
directory_service_role=role)
changed = True
except Exception:
module.fail_json(msg='Delete Directory Service Role {0} failed'.format(module.params['role']))
module.exit_json(changed=changed)
def create_role(module, blade):
"""Create Directory Service Role"""
changed = False
try:
role = DirectoryServiceRole(group_base=module.params['group_base'],
group=module.params['group'])
blade.directory_services.update_directory_services_roles(names=[module.params['role']],
directory_service_role=role)
changed = True
except Exception:
module.fail_json(msg='Create Directory Service Role {0} failed: Check configuration'.format(module.params['role']))
module.exit_json(changed=changed)
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(dict(
role=dict(required=True, type='str', choices=['array_admin', 'ops_admin', 'readonly', 'storage_admin']),
state=dict(type='str', default='present', choices=['absent', 'present']),
group_base=dict(type='str'),
group=dict(type='str'),
))
required_together = [['group', 'group_base']]
module = AnsibleModule(argument_spec,
required_together=required_together,
supports_check_mode=False)
if not HAS_PURITY_FB:
module.fail_json(msg='purity_fb sdk is required for this module')
state = module.params['state']
blade = get_blade(module)
role_configured = False
role = blade.directory_services.list_directory_services_roles(names=[module.params['role']])
if role.items[0].group is not None:
role_configured = True
if state == 'absent' and role_configured:
delete_role(module, blade)
elif role_configured and state == 'present':
update_role(module, blade)
elif not role_configured and state == 'present':
create_role(module, blade)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Tech Receptives (<http://techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Singapore - Accounting',
'version': '1.0',
'author': 'Tech Receptives',
'website': 'http://www.techreceptives.com',
'category': 'Localization/Account Charts',
'description': """
Singapore accounting chart and localization.
=======================================================
After installing this module, the Configuration wizard for accounting is launched.
* The Chart of Accounts consists of the list of all the general ledger accounts
required to maintain the transactions of Singapore.
* On that particular wizard, you will be asked to pass the name of the company,
the chart template to follow, the no. of digits to generate, the code for your
account and bank account, currency to create journals.
* The Chart of Taxes would display the different types/groups of taxes such as
Standard Rates, Zeroed, Exempted, MES and Out of Scope.
* The tax codes are specified considering the Tax Group and for easy accessibility of
submission of GST Tax Report.
""",
'depends': ['base', 'account', 'account_chart'],
'demo': [ ],
'data': [
'l10n_sg_chart_tax_code.xml',
'l10n_sg_chart.xml',
'l10n_sg_chart_tax.xml',
'l10n_sg_wizard.xml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int8
from geometry_msgs.msg import Twist
from sensor_msgs.msg import JointState
from brics_actuator.msg import JointPositions, JointValue, JointVelocities
import sys
import copy
import numpy
import time
class switch(object):
value = None
def __new__(class_, value):
class_.value = value
return True
def case(*args):
return any((arg == switch.value for arg in args))
class arm_listener:
def __init__(self):
self.done = 0
rospy.init_node('arm_listener', anonymous = True)
rospy.Subscriber('arm_command', Twist, self.processCmd)
rospy.Subscriber('joint_states', JointState, self.processJoints)
self.armPub = rospy.Publisher('/arm_controller/position_command', JointPositions, queue_size=10)
self.gripPub = rospy.Publisher('/gripper_controller/position_command', JointPositions, queue_size=10)
self.doPub = rospy.Publisher('/done_arm', Int8, queue_size=10)
self.pos = JointValue()
self.uri = 0
self.value = [0 for i in range(6)]
self.joint_states = JointState()
self.desiredposition = JointPositions()
self.once = 1
self.jointBoundsUp = [5.8, 2.6, -0.015708, 3.40, 5.60, 0.0115]
self.jointBoundsLow = [0.011, 0.020, -5.02654, 0.03, 0.12, 0.0]
#time.sleep(2)
def processCmd(self, cmd_vel):
self.cmd = cmd_vel
value = [0 for i in range(6)]
unit = [0 for i in range(6)]
value[0] = self.cmd.linear.x # joint_uri_1
value[1] = self.cmd.linear.y # joint_uri_2
value[2] = self.cmd.linear.z # joint_uri_3
value[3] = self.cmd.angular.x # joint_uri_4
value[4] = self.cmd.angular.y # joint_uri_5
value[5] = self.cmd.angular.z # joint_uri_6
self.value = value
print(self.value)
for index in range(5):
unit[index] = "rad"
unit[5]="m"
for index in range(0, 6):
if value[index] > self.jointBoundsUp[index]:
value[index] = self.jointBoundsUp[index]
elif value[index] < self.jointBoundsLow[index]:
value[index] = self.jointBoundsLow[index]
for index in range(0, 5):
pos = JointValue()
pos.joint_uri = "arm_joint_" + str(index + 1)
pos.unit = unit[index]
pos.value = value[index]
self.desiredposition.positions.append(pos)
# print("------------------------------------------------")
# print(self.desiredposition.positions)
self.armPub.publish(self.desiredposition)
time.sleep(10)
pos.joint_uri = "gripper_finger_joint_l"
pos.unit = unit[5]
pos.value = value[5]
self.desiredposition.positions.append(pos)
self.gripPub.publish(self.desiredposition)
time.sleep(0.2)
pos.joint_uri = "gripper_finger_joint_r"
pos.unit = unit[5]
pos.value = value[5]
self.desiredposition.positions.append(pos)
self.gripPub.publish(self.desiredposition)
self.desiredposition = JointPositions()
def processJoints(self, joint_states):
self.joint_states = copy.deepcopy(joint_states)
print("sono qui")
#for index in range(0, 5):
#time.sleep(0.06)
if (abs(self.value[0] - self.joint_states.position[10])) <= 0.015:
self.done = self.done + 1
else:
self.done = self.done
if (abs(self.value[1] - self.joint_states.position[11])) <= 0.015:
self.done = self.done + 1
else:
self.done = self.done
if (abs(self.value[2] - self.joint_states.position[12])) <= 0.015:
self.done = self.done + 1
else:
self.done = self.done
if (abs(self.value[3] - self.joint_states.position[13])) <= 0.015:
self.done = self.done + 1
else:
self.done = self.done
if (abs(self.value[4] - self.joint_states.position[14])) <= 0.015:
self.done = self.done + 1
else:
self.done = self.done
if (abs(self.value[5] - self.joint_states.position[15]) <= 0.001) and (abs(self.value[5] - self.joint_states.position[16]) <= 0.001):
self.done = self.done + 1
else:
self.done = self.done
#time.sleep(10)
#print(self.done)
if self.done == 6:
if self.once:
self.doPub.publish(1)
self.once = 0
else:
self.doPub.publish(0)
self.once = 1
#time.sleep(2)
#print("================================AZZERAMENTO==========================================")
self.done = 0
def main(args):
arm_listener()
# rospy.init_node('arm_listener', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main(sys.argv) | unknown | codeparrot/codeparrot-clean | ||
name: Run CodeQL
on:
schedule:
- cron: 0 0 * * *
permissions:
contents: read
jobs:
analyze:
name: Analyze
runs-on: ubuntu-slim
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [cpp, javascript, python]
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@b20883b0cd1f46c72ae0ba6d1090936928f9fa30 # v4.32.0
with:
languages: ${{ matrix.language }}
config-file: ./.github/codeql-config.yml
- name: Autobuild
uses: github/codeql-action/autobuild@b20883b0cd1f46c72ae0ba6d1090936928f9fa30 # v4.32.0
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@b20883b0cd1f46c72ae0ba6d1090936928f9fa30 # v4.32.0
with:
category: /language:${{matrix.language}} | unknown | github | https://github.com/nodejs/node | .github/workflows/codeql.yml |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name' : 'Invoicing',
'version' : '1.1',
'summary': 'Invoices & Payments',
'sequence': 10,
'description': """
Invoicing & Payments
====================
The specific and easy-to-use Invoicing system in Odoo allows you to keep track of your accounting, even when you are not an accountant. It provides an easy way to follow up on your vendors and customers.
You could use this simplified accounting in case you work with an (external) account to keep your books, and you still want to keep track of payments. This module also offers you an easy method of registering payments, without having to encode complete abstracts of account.
""",
'category': 'Accounting/Accounting',
'website': 'https://www.odoo.com/page/billing',
'images' : ['images/accounts.jpeg','images/bank_statement.jpeg','images/cash_register.jpeg','images/chart_of_accounts.jpeg','images/customer_invoice.jpeg','images/journal_entries.jpeg'],
'depends' : ['base_setup', 'product', 'analytic', 'portal', 'digest'],
'data': [
'security/account_security.xml',
'security/ir.model.access.csv',
'data/data_account_type.xml',
'data/account_data.xml',
'data/digest_data.xml',
'views/account_payment_view.xml',
'wizard/account_automatic_entry_wizard_views.xml',
'wizard/account_unreconcile_view.xml',
'wizard/account_move_reversal_view.xml',
'wizard/account_resequence_views.xml',
'wizard/account_payment_register_views.xml',
'views/account_move_views.xml',
'wizard/setup_wizards_view.xml',
'wizard/pos_box.xml',
'views/partner_view.xml',
'views/account_account_type_views.xml',
'views/account_account_views.xml',
'views/account_group_views.xml',
'views/account_journal_views.xml',
'views/account_account_tag_views.xml',
'views/account_bank_statement_views.xml',
'views/account_reconcile_model_views.xml',
'views/account_tax_views.xml',
'views/account_full_reconcile_views.xml',
'views/account_payment_term_views.xml',
'views/account_chart_template_views.xml',
'views/res_partner_bank_views.xml',
'views/report_statement.xml',
'views/account_report.xml',
'data/mail_template_data.xml',
'wizard/account_validate_move_view.xml',
'views/res_company_views.xml',
'views/product_view.xml',
'views/account_analytic_view.xml',
'views/account.xml',
'views/report_invoice.xml',
'report/account_invoice_report_view.xml',
'views/account_cash_rounding_view.xml',
'wizard/account_report_common_view.xml',
'views/report_journal.xml',
'views/tax_adjustments.xml',
'wizard/wizard_tax_adjustments_view.xml',
'views/res_config_settings_views.xml',
'views/account_journal_dashboard_view.xml',
'views/account_portal_templates.xml',
'views/report_payment_receipt_templates.xml',
'data/payment_receipt_data.xml',
'views/account_onboarding_templates.xml',
'data/service_cron.xml',
'views/account_incoterms_view.xml',
'data/account_incoterms_data.xml',
'views/digest_views.xml',
'wizard/account_invoice_send_views.xml',
'views/account_tax_report_views.xml',
'report/account_hash_integrity_templates.xml',
'views/res_currency.xml',
'views/account_menuitem.xml',
'views/account_analytic_default_view.xml',
'wizard/account_tour_upload_bill.xml',
],
'demo': [
'demo/account_demo.xml',
],
'qweb': [
"static/src/xml/account_payment.xml",
'static/src/xml/account_resequence.xml',
"static/src/xml/account_report_backend.xml",
"static/src/xml/bills_tree_upload_views.xml",
'static/src/xml/account_journal_activity.xml',
'static/src/xml/grouped_view_widget.xml',
'static/src/xml/tax_group.xml',
],
'installable': True,
'application': True,
'auto_install': False,
'post_init_hook': '_account_post_init',
} | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import variable_scope
def accuracy(predictions, labels, weights=None, name=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
name: A name for the operation (optional).
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope(name, 'accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.multiply(is_correct, weights)
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
def f1_score(labels, predictions, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the approximately best F1-score across different thresholds.
The f1_score function applies a range of thresholds to the predictions to
convert them from [0, 1] to bool. Precision and recall are computed by
comparing them to the labels. The F1-Score is then defined as
2 * precision * recall / (precision + recall). The best one across the
thresholds is returned.
Disclaimer: In practice it may be desirable to choose the best threshold on
the validation set and evaluate the F1 score with this threshold on a
separate test set. Or it may be desirable to use a fixed threshold (e.g. 0.5).
This function internally creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the pairs of recall and precision values for a linearly spaced set of
thresholds from which the best f1-score is derived.
This value is ultimately returned as `f1-score`, an idempotent operation that
computes the F1-score (computed using the aforementioned variables). The
`num_thresholds` variable controls the degree of discretization with larger
numbers of thresholds more closely approximating the true best F1-score.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the F1-score.
Example usage with a custom estimator:
def model_fn(features, labels, mode):
predictions = make_predictions(features)
loss = make_loss(predictions, labels)
train_op = tf.contrib.training.create_train_op(
total_loss=loss,
optimizer='Adam')
eval_metric_ops = {'f1': f1_score(labels, predictions)}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
estimator = tf.estimator.Estimator(model_fn=model_fn)
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `f1_score` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
f1_score: A scalar `Tensor` representing the current best f1-score across
different thresholds.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches the `f1_score`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'f1', (labels, predictions, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions, labels=labels, weights=weights)
# To account for floating point imprecisions / avoid division by zero.
epsilon = 1e-7
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
thresholds = [0.0 - epsilon] + thresholds + [1.0 + epsilon]
# Confusion matrix.
values, update_ops = metrics_impl._confusion_matrix_at_thresholds( # pylint: disable=protected-access
labels, predictions, thresholds, weights, includes=('tp', 'fp', 'fn'))
# Compute precision and recall at various thresholds.
def compute_best_f1_score(tp, fp, fn, name):
precision_at_t = math_ops.div(tp, epsilon + tp + fp,
name='precision_' + name)
recall_at_t = math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
# Compute F1 score.
f1_at_thresholds = (
2.0 * precision_at_t * recall_at_t /
(precision_at_t + recall_at_t + epsilon))
return math_ops.reduce_max(f1_at_thresholds)
def f1_across_replicas(_, values):
best_f1 = compute_best_f1_score(tp=values['tp'], fp=values['fp'],
fn=values['fn'], name='value')
if metrics_collections:
ops.add_to_collections(metrics_collections, best_f1)
return best_f1
best_f1 = tf.distribute.get_replica_context().merge_call(
f1_across_replicas, args=(values,))
update_op = compute_best_f1_score(tp=update_ops['tp'], fp=update_ops['fp'],
fn=update_ops['fn'], name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return best_f1, update_op | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
__author__ = 'Brett Bowman'
__email__ = 'bbowman@pacificbiosciences.com'
import logging, logging.config
import subprocess
import tempfile
import shlex
from pbhla import __LOG__
from pbhla.utils import which
logging.config.fileConfig( __LOG__ )
log = logging.getLogger(__name__)
class ExternalTool( object ):
"""An abstract class for running external command-line tools"""
counter = 0
def __init__(self, setup=None):
self._setup = setup
self._use_setup = self.test_setup()
def test_setup(self):
"""Determine whether we need a setup script, and which environment to use"""
if self.setup and which(self.exe):
# TODO: Add validation that the setup script works
log.info('"%s" not detected, using supplied environment' % self.name)
return True
elif self.setup and which(self.exe) is None:
log.info('%s detected, but using supplied environment instead.' % self.name + \
'Do not pass a "setup" argument to use local environment')
return True
elif self.setup is None and which(self.exe):
log.info('"%s" detected, using local environment' % self.name)
return False
else:
msg = '"%s" requires EITHER a valid executable in the local ' % self.name + \
'path OR a virtualenv setup script'
log.error( msg )
raise Exception( msg )
@property
def name(self):
raise NotImplementedError("Subclasses should implement this!")
@property
def exe(self):
raise NotImplementedError("Subclasses should implement this!")
@property
def setup(self):
return self._setup
@property
def use_setup(self):
return self._use_setup
@property
def commandline(self):
raise NotImplementedError("Subclasses should implement this!")
@property
def commandline_args(self):
return shlex.split( self.commandline )
def set_arguments(self, **kwargs):
raise NotImplementedError("Subclasses should implement this!")
def set_defaults(self):
raise NotImplementedError("Subclasses should implement this!")
def check_arguments(self):
raise NotImplementedError("Subclasses should implement this!")
def check_output(self):
raise NotImplementedError("Subclasses should implement this!")
def run_process(self, process_args, name):
log.info("Executing child '%s' process" % name)
if self._use_setup:
log.info('Executing subprocess indirectly via Shell Script')
script = self.write_script( process_args, name )
log_path = self.get_log_path( name )
with open( log_path, 'w' ) as log_handle:
p = subprocess.Popen( ['source', script],
executable='/bin/bash',
stderr=subprocess.STDOUT,
stdout=log_handle)
p.wait()
else:
log.info('Executing subprocess directly via Subprocess')
p = subprocess.Popen( process_args )
p.wait()
log.info('Child process finished successfully')
def write_shell_script( self ):
script_file = tempfile.NamedTemporaryFile(suffix='.sh', delete=False)
with open( script_file, 'w') as handle:
handle.write('source %s\n' % self.setup)
handle.write( '%s\n' % self.commandline )
return script_file
def get_log_path( self, name ):
log_name = '%s_%s.log' % (self._counter, name)
return os.path.join( self._logs, log_name )
def run( self ):
log.info('Running %s' % self.name)
if self.use_setup:
log.info('Executing %s indirectly via Shell Script' % self.name)
else:
log.info('Executing %s directly via Subprocess' % self.name)
print self.commandline
output = self.run_subprocess()
print output
log.info('Finished Executing %s' % self.name)
def run_subprocess_script(self):
shell_script = self.write_shell_script()
def run_subprocess(self):
"""Run a process directly via Subprocess and validate the output"""
output = subprocess.check_output( self.commandline_args )
return output
def __call__(self, output_dir=None, **kwargs):
self.counter += 1
self.set_arguments( **kwargs ) # 1. Initialize
self.set_defaults() # 2. Set unspecified values to default
self.check_arguments() # 3. Sanity Check
self.run() # 4. Run
return self.check_output() # 5. Validate | unknown | codeparrot/codeparrot-clean | ||
import { CMS_NAME, CMS_URL } from "@/lib/constants";
export default function Intro() {
return (
<section className="flex-col md:flex-row flex items-center md:justify-between mt-16 mb-16 md:mb-12">
<h1 className="text-6xl md:text-8xl font-bold tracking-tighter leading-tight md:pr-8">
Blog.
</h1>
<h4 className="text-center md:text-left text-lg mt-5 md:pl-8">
A statically generated blog example using{" "}
<a
href="https://nextjs.org/"
className="underline hover:text-success duration-200 transition-colors"
>
Next.js
</a>{" "}
and{" "}
<a
href={CMS_URL}
className="underline hover:text-success duration-200 transition-colors"
>
{CMS_NAME}
</a>
.
</h4>
</section>
);
} | javascript | github | https://github.com/vercel/next.js | examples/cms-datocms/components/intro.js |
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
"use strict";
const makeSerializable = require("../util/makeSerializable");
const ContextDependency = require("./ContextDependency");
const ModuleDependencyTemplateAsRequireId = require("./ModuleDependencyTemplateAsRequireId");
/** @typedef {import("../javascript/JavascriptParser").Range} Range */
/** @typedef {import("./ContextDependency").ContextDependencyOptions} ContextDependencyOptions */
class RequireContextDependency extends ContextDependency {
/**
* @param {ContextDependencyOptions} options options
* @param {Range} range range
*/
constructor(options, range) {
super(options);
this.range = range;
}
get type() {
return "require.context";
}
}
makeSerializable(
RequireContextDependency,
"webpack/lib/dependencies/RequireContextDependency"
);
RequireContextDependency.Template = ModuleDependencyTemplateAsRequireId;
module.exports = RequireContextDependency; | javascript | github | https://github.com/webpack/webpack | lib/dependencies/RequireContextDependency.js |
import discord
import json
from discord.ext import commands
from sys import argv
class Helper_list:
"""
Management of active helpers.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
@commands.has_permissions(administrator=True)
@commands.command(pass_context=True)
async def addhelper(self, ctx, user, position):
"""Add user as a helper. Owners only."""
if position not in self.bot.helper_roles:
await self.bot.say("💢 That's not a valid position. You can use __{}__".format("__, __".join(self.bot.helper_roles.keys())))
return
member = ctx.message.mentions[0]
with open("data/helpers.json", "r") as f:
helpers = json.load(f)
helpers[member.id] = position
with open("data/helpers.json", "w") as f:
json.dump(helpers, f)
await self.bot.add_roles(member, self.bot.helpers_role)
await self.bot.say("{} is now a helper. Welcome to the party room!".format(member.mention, position))
@commands.has_permissions(administrator=True)
@commands.command(pass_context=True)
async def delhelper(self, ctx, user):
"""Remove user from helpers. Owners only."""
member = ctx.message.mentions[0]
server = ctx.message.author.server
await self.bot.say(member.name)
with open("data/helpers.json", "r") as f:
helpers = json.load(f)
helpers.pop(member.id, None)
with open("data/helpers.json", "w") as f:
json.dump(helpers, f)
await self.bot.remove_roles(member, self.bot.helpers_role, *self.bot.helper_roles.values())
await self.bot.say("{} is no longer a helper. Stop by some time!".format(member.mention))
@commands.command(pass_context=True)
async def helpon(self, ctx):
"""Gain highlighted helping role. Only needed by Helpers."""
author = ctx.message.author
server = author.server
with open("data/helpers.json", "r") as f:
helpers = json.load(f)
if author.id not in helpers:
await self.bot.say("You are not listed as a helper, and can't use this.")
return
await self.bot.add_roles(author, self.bot.helper_roles[helpers[author.id]])
await self.bot.say("{} is now actively helping.".format(author.mention))
msg = "🚑 **Elevated: +Help**: {} | {}#{}".format(author.mention, author.name, author.discriminator)
await self.bot.send_message(self.bot.modlogs_channel, msg)
@commands.command(pass_context=True)
async def helpoff(self, ctx):
"""Remove highlighted helping role. Only needed by Helpers."""
author = ctx.message.author
server = author.server
with open("data/helpers.json", "r") as f:
helpers = json.load(f)
if author.id not in helpers:
await self.bot.say("You are not listed as a helper, and can't use this.")
return
await self.bot.remove_roles(author, self.bot.helper_roles[helpers[author.id]])
await self.bot.say("{} is no longer actively helping!".format(author.mention))
msg = "👎🏻 **De-Elevated: -Help**: {} | {}#{}".format(author.mention, author.name, author.discriminator)
await self.bot.send_message(self.bot.modlogs_channel, msg)
def setup(bot):
bot.add_cog(Helper_list(bot)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
import csv
import itertools
import sys
import time
import warnings
from contextlib import nullcontext
import click
import numpy as np
from operator_inp_utils import OperatorInputsLoader
from tqdm import tqdm
import torch
from torch._dynamo.backends.cudagraphs import cudagraphs_inner
from torch._dynamo.testing import same
from torch._inductor.compile_fx import compile_fx
from torch._inductor.decomposition import decompositions
from torch._inductor.lowering import lowerings
from torch._inductor.runtime.benchmarking import benchmarker
from torch._inductor.utils import gen_gm_and_inputs
from torch.utils._pytree import tree_map_only
aten = torch.ops.aten
profile_enabled = False
inductor_config_options = {
"halide": {"cpu_backend": "halide", "cuda_backend": "halide"},
"autotune": {
"max_autotune_pointwise": True,
"max_autotune": True,
"max_autotune_gemm": True,
"coordinate_descent_tuning": True,
},
}
def maybe_record_function(name):
return torch.profiler.record_function(name) if profile_enabled else nullcontext()
def compute_speedups(
operator, models, example_inputs, repeats, accuracy_checking=False, device="cuda"
):
expected = models[0](*example_inputs)
if accuracy_checking:
for model in models[1:]:
actual = model(*example_inputs)
# change to assert later
try:
same(actual, expected, cos_similarity=True, equal_nan=True)
except AssertionError as e:
print(e)
print(f"Accuracy check failed: {operator}")
print((expected[0] - actual[0]).abs().max())
timings = np.zeros((repeats, len(models)), np.float64)
for rep in range(repeats):
with maybe_record_function(f"rep_{rep}"):
# interleave the runs to handle frequency scaling and load changes
for m, model in enumerate(models):
with maybe_record_function(f"model_{m}"):
if device == "cuda":
model(*example_inputs)
# benchmarker.benchmark_gpu() clears L2 cache to hide the latency of CPU launch time
# along with cuda synchronization
timings[rep, m] = benchmarker.benchmark_gpu(
lambda: model(*example_inputs)
)
else:
from torch._inductor.utils import timed
timings[rep, m] = timed(model, example_inputs)
return np.median(timings, axis=0)
def strip_overloads(gm):
"""
Modifies the target of graph nodes in :attr:`gm` to strip overloads.
Args:
gm(fx.GraphModule): The input Fx graph module to be modified
"""
for node in gm.graph.nodes:
if isinstance(node.target, torch._ops.OpOverload):
node.target = node.target.overloadpacket
gm.recompile()
def convert_to_jit(gm, gm_args):
strip_overloads(gm)
try:
return torch.jit.script(gm)
except Exception:
pass
return torch.jit.trace(gm, gm_args)
def to_channels_last(ten):
return ten if ten.ndim != 4 else ten.to(memory_format=torch.channels_last)
def microbenchmark(
operator,
args,
kwargs,
accuracy_checking,
repeats,
inductor_configs,
measure_nvfuser,
device,
):
gm, gm_args = gen_gm_and_inputs(operator, args, kwargs)
torch.jit._builtins._register_builtin(
torch.ops.aten.convolution_backward.default, "aten::convolution_backward"
)
compiled = [gm]
for config in inductor_configs:
t = -time.perf_counter()
compiled.append(compile_fx(gm, gm_args, config_patches=config))
t += time.perf_counter()
if t > 10:
print(f"slow compile inductor {t:.1f}s {config}")
if measure_nvfuser:
g = convert_to_jit(gm, gm_args)
cudagraphs_jit = cudagraphs_inner(
g, gm_args, copy_outputs=False, copy_inputs=False
)
compiled += [cudagraphs_jit]
if accuracy_checking:
repeats = 1
medians = compute_speedups(
operator, compiled, gm_args, repeats, accuracy_checking, device
)
return medians
quantiles_thresholds = (0.2, 0.5, 0.8)
def quantiles(timings):
return np.quantile(timings, quantiles_thresholds).tolist()
def skip_operator(operator):
nyi_strings = (
"aten.gather.default",
"nll_loss",
"aten.index",
"aten.scatter_",
"masked_fill_.Scalar",
)
if any(nyi_string in str(operator) for nyi_string in nyi_strings):
# maybe disable aten.native_layer_norm.default
# TODO - inputs cannot be randomly initialized, causes cyda failures
print(f"Skipping {operator}, input generator nyi")
return True
# not covered by other non-compute operator heuristics
if operator == torch.ops.aten._unsafe_view.default:
print(f"Skipping {operator}, non compute operator")
return True
# some of inductor registered to the OpOverload, some registered to OpOverloadPacket
op_impls = [operator]
if isinstance(operator, torch._ops.OpOverload):
op_impls.append(operator.overloadpacket)
# TODO - skip benchmarking fallbacks. for some ops we have both lowerings and fallbacks
# so its not clear just from operator what will be lowered.
if all(op not in decompositions and op not in lowerings for op in op_impls):
print(f"Skipping {operator}, no inductor impl")
return True
if "convolution" in str(operator):
return True
return False
@click.command()
@click.option(
"--suite",
help="suite to load inps from: options: timm, huggingface, torchbench",
default="torchbench",
)
@click.option("--op", help="operator overload to benchmark", default="all")
@click.option("--dtype", help="dtype to benchmark", default="float32")
@click.option("--max-samples", help="max samples per op", default=15)
@click.option("--accuracy-checking", help="check accuracy", default=False)
@click.option(
"--repeats", help="how many times to repeat for perf measurement", default=3
)
@click.option(
"--inductor-config",
multiple=True,
help="Custom inductor config, options: " + ", ".join(inductor_config_options),
)
@click.option(
"--measure-nvfuser/--no-measure-nvfuser",
help="default we only measure inductor",
default=False,
)
@click.option("--device", help="cpu or cuda", default="cuda")
@click.option("--inp-file", help="use custom input file instead of suite", default=None)
@click.option("--start-idx", help="specify start index of samples", default=0)
@click.option(
"--channels-last", help="force inputs to channels last", is_flag=True, default=False
)
@click.option("--profile", help="profile the benchmark", is_flag=True, default=False)
def benchmark(
suite,
op,
dtype,
max_samples,
accuracy_checking,
repeats,
inductor_config,
measure_nvfuser,
device,
inp_file,
start_idx,
channels_last,
profile,
):
warnings.filterwarnings("ignore", module="torch.jit._check")
torch.set_float32_matmul_precision("high")
global profile_enabled
if inp_file is not None:
loader = OperatorInputsLoader(inp_file)
else:
if suite not in ("timm", "huggingface", "torchbench"):
raise AssertionError(
f"suite must be one of 'timm', 'huggingface', 'torchbench', but got '{suite}'"
)
if suite == "timm":
loader = OperatorInputsLoader.get_timm_loader()
elif suite == "huggingface":
loader = OperatorInputsLoader.get_huggingface_loader()
else:
loader = OperatorInputsLoader.get_torchbench_loader()
if dtype not in ("float16", "float32"):
raise AssertionError(f"dtype must be 'float16' or 'float32', but got '{dtype}'")
inductor_configs = [{}]
backend_names = ["inductor"]
for name in inductor_config or ():
backend_names.append(name)
inductor_configs.append(inductor_config_options[name])
if measure_nvfuser:
backend_names.append("nvfuser")
compare2 = len(backend_names) == 2
if compare2:
a, b = backend_names
backend_names.append(f"{a}/{b}")
output_fd = None
output_csv = None
if op == "all":
filename = f"operatorbench_{suite}_{dtype}.csv"
with open(filename, "w") as output_fd:
output_csv = csv.writer(output_fd)
output_csv.writerow(
[
"operator",
*[
f"{a} {b}"
for a, b in itertools.product(
backend_names,
[f"{x * 100:.0f}th" for x in quantiles_thresholds],
)
],
"elapsed",
*map("{} abs".format, ["eager", *backend_names]),
]
)
dtype = torch.float16 if dtype == "float16" else torch.float32
if op == "all":
ops = loader.get_all_ops()
else:
ops = [eval(op)]
max_samples = max_samples + start_idx
profile_enabled = profile
for operator in ops:
if skip_operator(operator):
continue
start = time.perf_counter()
inp_gen = loader.get_inputs_for_operator(operator, dtype=dtype, device=device)
timings = []
inputs_list = []
for _ in range(min(max_samples, 1000000)):
try:
inps = next(inp_gen)
inputs_list.append(inps)
except StopIteration:
break
profiler_context = (
torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
],
record_shapes=False,
profile_memory=False,
on_trace_ready=torch.profiler.tensorboard_trace_handler(
f"./log/operator_{operator}", use_gzip=True
),
)
if profile_enabled
else nullcontext()
)
with profiler_context:
for i, inps in enumerate(tqdm(inputs_list[start_idx:], desc=str(operator))):
if inps is None:
break
args, kwargs = inps
if channels_last:
args, kwargs = tree_map_only(
torch.Tensor, to_channels_last, (args, kwargs)
)
try:
with maybe_record_function(f"iter_{i}"):
# aten, nvfuser, inductor
timings.append(
microbenchmark(
operator,
args,
kwargs,
accuracy_checking,
repeats,
inductor_configs,
measure_nvfuser,
device,
)
)
except Exception as e:
print(f"error {operator} input {i}: {type(e).__name__}: {e}")
# comment out this line to avoid blocking other tests
# raise e
if not timings:
continue
timings = np.stack(timings)
speedups = [
quantiles(timings[:, 0] / timings[:, x]) for x in range(1, timings.shape[1])
]
if compare2:
speedups.append(quantiles(timings[:, 1] / timings[:, 2]))
if len(backend_names) != len(speedups):
raise AssertionError(
f"Expected {len(backend_names)} speedups for {len(backend_names)} backends, but got {len(speedups)}"
)
row = [f"{operator}"]
sys.stdout.write(f"{operator}: ")
for backend, (low, mid, high) in zip(backend_names, speedups):
sys.stdout.write(f"{backend}={mid:.4f}x ({low:.4f}-{high:.4f}) ")
row.extend(map("{:.6f}".format, [low, mid, high]))
elapsed = time.perf_counter() - start
row.append(f"{elapsed:1f}")
row.extend(map("{:.8f}".format, np.mean(timings, axis=0).tolist()))
sys.stdout.write(f"took {elapsed:.0f}s\n")
sys.stdout.flush()
if output_csv:
output_csv.writerow(row)
output_fd.flush()
if output_fd:
print(f"Wrote {filename}")
output_fd.close()
if __name__ == "__main__":
benchmark() | python | github | https://github.com/pytorch/pytorch | benchmarks/dynamo/microbenchmarks/operatorbench.py |
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito.internal.util;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import org.mockito.mock.MockType;
import org.mockitoutil.TestBase;
public class MockNameImplTest extends TestBase {
@Test
public void shouldProvideTheNameForClass() throws Exception {
// when
String name = new MockNameImpl(null, SomeClass.class, MockType.INSTANCE).toString();
// then
assertEquals("someClass", name);
}
@Test
public void shouldProvideTheNameForClassOnStaticMock() throws Exception {
// when
String name = new MockNameImpl(null, SomeClass.class, MockType.STATIC).toString();
// then
assertEquals("SomeClass.class", name);
}
@Test
public void shouldProvideTheNameForAnonymousClass() throws Exception {
// given
SomeInterface anonymousInstance = new SomeInterface() {};
// when
String name =
new MockNameImpl(null, anonymousInstance.getClass(), MockType.INSTANCE).toString();
// then
assertEquals("someInterface", name);
}
@Test
public void shouldProvideTheNameForAnonymousClassOnStatic() throws Exception {
// given
SomeInterface anonymousInstance = new SomeInterface() {};
// when
String name =
new MockNameImpl(null, anonymousInstance.getClass(), MockType.STATIC).toString();
// then
assertEquals("SomeInterface$.class", name);
}
@Test
public void shouldProvideTheGivenName() throws Exception {
// when
String name = new MockNameImpl("The Hulk", SomeClass.class, MockType.INSTANCE).toString();
// then
assertEquals("The Hulk", name);
}
@Test
public void shouldProvideTheGivenNameOnStatic() throws Exception {
// when
String name = new MockNameImpl("The Hulk", SomeClass.class, MockType.STATIC).toString();
// then
assertEquals("The Hulk", name);
}
private class SomeClass {}
private class SomeInterface {}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/test/java/org/mockito/internal/util/MockNameImplTest.java |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file provides utilities for designated initializers.
///
//===----------------------------------------------------------------------===//
#include "DesignatedInitializers.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/ScopeExit.h"
namespace clang::tidy::utils {
/// Returns true if Name is reserved, like _Foo or __Vector_base.
static inline bool isReservedName(llvm::StringRef Name) {
// This doesn't catch all cases, but the most common.
return Name.size() >= 2 && Name[0] == '_' &&
(isUppercase(Name[1]) || Name[1] == '_');
}
namespace {
// Helper class to iterate over the designator names of an aggregate type.
//
// For an array type, yields [0], [1], [2]...
// For aggregate classes, yields null for each base, then .field1, .field2,
// ...
class AggregateDesignatorNames {
public:
AggregateDesignatorNames(QualType T) {
if (!T.isNull()) {
T = T.getCanonicalType();
if (T->isArrayType()) {
IsArray = true;
Valid = true;
return;
}
if (const RecordDecl *RD = T->getAsRecordDecl()) {
Valid = true;
FieldsIt = RD->field_begin();
FieldsEnd = RD->field_end();
if (const auto *CRD = llvm::dyn_cast<CXXRecordDecl>(RD)) {
BasesIt = CRD->bases_begin();
BasesEnd = CRD->bases_end();
Valid = CRD->isAggregate();
}
OneField = Valid && BasesIt == BasesEnd && FieldsIt != FieldsEnd &&
std::next(FieldsIt) == FieldsEnd;
}
}
}
// Returns false if the type was not an aggregate.
operator bool() const { return Valid; }
// Advance to the next element in the aggregate.
void next() {
if (IsArray)
++Index;
else if (BasesIt != BasesEnd)
++BasesIt;
else if (FieldsIt != FieldsEnd)
++FieldsIt;
}
// Print the designator to Out.
// Returns false if we could not produce a designator for this element.
bool append(std::string &Out, bool ForSubobject) {
if (IsArray) {
Out.push_back('[');
Out.append(std::to_string(Index));
Out.push_back(']');
return true;
}
if (BasesIt != BasesEnd)
return false; // Bases can't be designated. Should we make one up?
if (FieldsIt != FieldsEnd) {
llvm::StringRef FieldName;
if (const IdentifierInfo *II = FieldsIt->getIdentifier())
FieldName = II->getName();
// For certain objects, their subobjects may be named directly.
if (ForSubobject &&
(FieldsIt->isAnonymousStructOrUnion() ||
// std::array<int,3> x = {1,2,3}. Designators not strictly valid!
(OneField && isReservedName(FieldName))))
return true;
if (!FieldName.empty() && !isReservedName(FieldName)) {
Out.push_back('.');
Out.append(FieldName.begin(), FieldName.end());
return true;
}
return false;
}
return false;
}
private:
bool Valid = false;
bool IsArray = false;
bool OneField = false; // e.g. std::array { T __elements[N]; }
unsigned Index = 0;
CXXRecordDecl::base_class_const_iterator BasesIt;
CXXRecordDecl::base_class_const_iterator BasesEnd;
RecordDecl::field_iterator FieldsIt;
RecordDecl::field_iterator FieldsEnd;
};
} // namespace
// Collect designator labels describing the elements of an init list.
//
// This function contributes the designators of some (sub)object, which is
// represented by the semantic InitListExpr Sem.
// This includes any nested subobjects, but *only* if they are part of the
// same original syntactic init list (due to brace elision). In other words,
// it may descend into subobjects but not written init-lists.
//
// For example: struct Outer { Inner a,b; }; struct Inner { int x, y; }
// Outer o{{1, 2}, 3};
// This function will be called with Sem = { {1, 2}, {3, ImplicitValue} }
// It should generate designators '.a:' and '.b.x:'.
// '.a:' is produced directly without recursing into the written sublist.
// (The written sublist will have a separate collectDesignators() call later).
// Recursion with Prefix='.b' and Sem = {3, ImplicitValue} produces '.b.x:'.
static void collectDesignators(
const InitListExpr *Sem, llvm::DenseMap<SourceLocation, std::string> &Out,
const llvm::DenseSet<SourceLocation> &NestedBraces, std::string &Prefix) {
if (!Sem || Sem->isTransparent())
return;
assert(Sem->isSemanticForm());
// The elements of the semantic form all correspond to direct subobjects of
// the aggregate type. `Fields` iterates over these subobject names.
AggregateDesignatorNames Fields(Sem->getType());
if (!Fields)
return;
for (const Expr *Init : Sem->inits()) {
const llvm::scope_exit Next([&, Size(Prefix.size())] {
Fields.next(); // Always advance to the next subobject name.
Prefix.resize(Size); // Erase any designator we appended.
});
// Skip for a broken initializer or if it is a "hole" in a subobject that
// was not explicitly initialized.
if (!Init || llvm::isa<ImplicitValueInitExpr>(Init))
continue;
const auto *BraceElidedSubobject = llvm::dyn_cast<InitListExpr>(Init);
if (BraceElidedSubobject &&
NestedBraces.contains(BraceElidedSubobject->getLBraceLoc()))
BraceElidedSubobject = nullptr; // there were braces!
if (!Fields.append(Prefix, BraceElidedSubobject != nullptr))
continue; // no designator available for this subobject
if (BraceElidedSubobject) {
// If the braces were elided, this aggregate subobject is initialized
// inline in the same syntactic list.
// Descend into the semantic list describing the subobject.
// (NestedBraces are still correct, they're from the same syntactic
// list).
collectDesignators(BraceElidedSubobject, Out, NestedBraces, Prefix);
continue;
}
Out.try_emplace(Init->getBeginLoc(), Prefix);
}
}
llvm::DenseMap<SourceLocation, std::string>
getUnwrittenDesignators(const InitListExpr *Syn) {
assert(Syn->isSyntacticForm());
// collectDesignators needs to know which InitListExprs in the semantic tree
// were actually written, but InitListExpr::isExplicit() lies.
// Instead, record where braces of sub-init-lists occur in the syntactic form.
llvm::DenseSet<SourceLocation> NestedBraces;
for (const Expr *Init : Syn->inits())
if (auto *Nested = llvm::dyn_cast<InitListExpr>(Init))
NestedBraces.insert(Nested->getLBraceLoc());
// Traverse the semantic form to find the designators.
// We use their SourceLocation to correlate with the syntactic form later.
llvm::DenseMap<SourceLocation, std::string> Designators;
std::string EmptyPrefix;
collectDesignators(Syn->isSemanticForm() ? Syn : Syn->getSemanticForm(),
Designators, NestedBraces, EmptyPrefix);
return Designators;
}
} // namespace clang::tidy::utils | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/utils/DesignatedInitializers.cpp |
#!/usr/bin/python
import numpy
# based on the vicar2png module by Jessica McKellar (jesstess at mit.edu)
# substantial modifications have been made to the code. However for
# thoroughness, I am including her Copyright under the MIT License below:
'''
The MIT License (MIT)
Copyright (c) 2012-2013 Jessica McKellar
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
'''
class VICARMetadata(object):
"""
Contains VICAR metadata accessible as uppercase class attributes,
e.g.:
vicar.RECSIZE
vicar.FORMAT
"""
def __init__(self, metadata):
"""
metadata: A dictionary of VICAR label/value pairs.
"""
for key, value in metadata.iteritems():
if value.isdigit():
value = int(value)
setattr(self, key.upper(), value)
def addMetadataToDict(metadata, metadata_dict):
gettingTag = True
has_lparen = False
has_lquote = False
tag_buf = ''
for char in metadata:
if gettingTag:
if char == '=':
tag = tag_buf
tag_buf = ''
gettingTag = False
has_lparen = False
has_lquote = False
elif char != ' ':
tag_buf += char
else: # getting value
if char == "'":
has_lquote = not has_lquote
if has_lparen:
tag_buf += char
elif char == "(" and not has_lquote:
has_lparen = True
tag_buf += char
elif char == ")" and not has_lquote:
has_lparen = False
tag_buf += char
elif char == " " and tag_buf and not (has_lquote or has_lparen):
# We have a full value, save it.
value = tag_buf
metadata_dict[tag] = value
gettingTag = True
has_lparen = False
has_lquote = False
tag_buf = ""
elif char == " " and not has_lquote:
continue
else:
tag_buf += char
return metadata_dict
def process_metadata(metadata_fd):
# A VICAR file must start with 'LBLSIZE=<integer label size>'.
lblsize_field = metadata_fd.read(len("LBLSIZE="))
if lblsize_field.upper() != "LBLSIZE=":
raise ValueError("Malformed VICAR file: doesn't start with LBLSIZE.")
lblsize = ""
while True:
char = metadata_fd.read(1)
if char == " ":
break
else:
lblsize += char
try:
lblsize = int(lblsize)
except ValueError:
raise ValueError("Malformed VICAR file: contains non-integer LBLSIZE.")
# Read in the rest of the VICAR metadata.
metadata_fd.seek(0)
metadata = metadata_fd.read(lblsize)
metadata_dict = {}
metadata_dict = addMetadataToDict(metadata, metadata_dict)
vicar = VICARMetadata(metadata_dict)
if(hasattr(vicar, 'EOL')):
if vicar.EOL == 1:
if vicar.FORMAT == 'BYTE':
byteCount = 1
elif vicar.FORMAT == 'HALF':
byteCount = 2
elif vicar.FORMAT == 'FULL':
byteCount = 4
elif vicar.FORMAT == 'REAL':
byteCount = 4
elif vicar.FORMAT == 'DOUB':
byteCount = 8
else:
raise ValueError('Unrecognized Vicar FORMAT: %s in file: %s'%(vicar.FORMAT,metadata_fd.name))
# Read in the VICAR metadata from the end of the file
metadata_fd.seek(vicar.LBLSIZE + vicar.NLB * vicar.RECSIZE
+ byteCount*vicar.N1*vicar.N2*vicar.N3)
# A VICAR file must start with 'LBLSIZE=<integer label size>'.
lblsize_field = metadata_fd.read(len("LBLSIZE="))
if lblsize_field.upper() != "LBLSIZE=":
raise ValueError("Malformed VICAR file: EOL doesn't start with LBLSIZE.")
lblsize = ""
while True:
char = metadata_fd.read(1)
if char == " ":
break
else:
lblsize += char
try:
lblsize = int(lblsize)
except ValueError:
raise ValueError("Malformed VICAR file: contains non-integer LBLSIZE.")
metadata_fd.seek(vicar.LBLSIZE + vicar.NLB * vicar.RECSIZE
+ byteCount*vicar.N1*vicar.N2*vicar.N3)
metadata = metadata_fd.read(lblsize)
metadata_dict = addMetadataToDict(metadata, metadata_dict)
metadata_fd.close()
return VICARMetadata(metadata_dict)
def extract_image(vicar, image_fd):
image_fd.seek(vicar.LBLSIZE + vicar.NLB * vicar.RECSIZE)
if vicar.FORMAT == 'BYTE':
outType = numpy.int8
elif vicar.FORMAT == 'HALF':
outType = numpy.int16
elif vicar.FORMAT == 'FULL':
outType = numpy.int32
elif vicar.FORMAT == 'REAL':
outType = numpy.float32
elif vicar.FORMAT == 'DOUB':
outType = numpy.float64
else:
raise ValueError('Unrecognized Vicar FORMAT: %s in file: %s'%(vicar.FORMAT,image_fd.name))
if vicar.ORG != 'BSQ':
raise ValueError('Vicar ORG: %i is not supported.'%vicar.ORG)
if vicar.NB > 1:
print 'Reading only the first image of %i images in the file'%vicar.NB
nx = vicar.NS
ny = vicar.NL
image = numpy.fromfile(image_fd,dtype=outType,count=nx*ny).reshape(ny,nx)
return image
def readVicar(infile):
metadata_fd = open(infile, "r")
vicar_metadata = process_metadata(metadata_fd)
image_fd = open(infile, "rb")
image = extract_image(vicar_metadata, image_fd)
return (image,vicar_metadata) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Examples:
# python manage.py rebuild_timeline --settings=settings.local_timeline --initial_date 2014-10-02 --final_date 2014-10-03
# python manage.py rebuild_timeline --settings=settings.local_timeline --purge
# python manage.py rebuild_timeline --settings=settings.local_timeline --initial_date 2014-10-02
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from django.db.models import Model
from django.db import reset_queries
from django.test.utils import override_settings
from taiga.projects.models import Project
from taiga.projects.history import services as history_services
from taiga.projects.history.choices import HistoryType
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.models import Timeline
from taiga.timeline.service import (_add_to_object_timeline, _get_impl_key_from_model,
_timeline_impl_map, extract_user_info)
from taiga.timeline.signals import on_new_history_entry, _push_to_timelines
from taiga.users.models import User
from unittest.mock import patch
from optparse import make_option
import gc
class BulkCreator(object):
def __init__(self):
self.timeline_objects = []
self.created = None
def create_element(self, element):
self.timeline_objects.append(element)
if len(self.timeline_objects) > 1000:
self.flush()
def flush(self):
Timeline.objects.bulk_create(self.timeline_objects, batch_size=1000)
del self.timeline_objects
self.timeline_objects = []
gc.collect()
bulk_creator = BulkCreator()
def custom_add_to_object_timeline(obj:object, instance:object, event_type:str, created_datetime:object, namespace:str="default", extra_data:dict={}):
assert isinstance(obj, Model), "obj must be a instance of Model"
assert isinstance(instance, Model), "instance must be a instance of Model"
event_type_key = _get_impl_key_from_model(instance.__class__, event_type)
impl = _timeline_impl_map.get(event_type_key, None)
bulk_creator.create_element(Timeline(
content_object=obj,
namespace=namespace,
event_type=event_type_key,
project=instance.project,
data=impl(instance, extra_data=extra_data),
data_content_type=ContentType.objects.get_for_model(instance.__class__),
created=created_datetime,
))
def generate_timeline(initial_date, final_date, project_id):
if initial_date or final_date or project_id:
timelines = Timeline.objects.all()
if initial_date:
timelines = timelines.filter(created__gte=initial_date)
if final_date:
timelines = timelines.filter(created__lt=final_date)
if project_id:
timelines = timelines.filter(project__id=project_id)
timelines.delete()
with patch('taiga.timeline.service._add_to_object_timeline', new=custom_add_to_object_timeline):
# Projects api wasn't a HistoryResourceMixin so we can't interate on the HistoryEntries in this case
projects = Project.objects.order_by("created_date")
history_entries = HistoryEntry.objects.order_by("created_at")
if initial_date:
projects = projects.filter(created_date__gte=initial_date)
history_entries = history_entries.filter(created_at__gte=initial_date)
if final_date:
projects = projects.filter(created_date__lt=final_date)
history_entries = history_entries.filter(created_at__lt=final_date)
if project_id:
project = Project.objects.get(id=project_id)
us_keys = ['userstories.userstory:%s'%(id) for id in project.user_stories.values_list("id", flat=True)]
tasks_keys = ['tasks.task:%s'%(id) for id in project.tasks.values_list("id", flat=True)]
issue_keys = ['issues.issue:%s'%(id) for id in project.issues.values_list("id", flat=True)]
wiki_keys = ['wiki.wikipage:%s'%(id) for id in project.wiki_pages.values_list("id", flat=True)]
keys = us_keys + tasks_keys + issue_keys + wiki_keys
projects = projects.filter(id=project_id)
history_entries = history_entries.filter(key__in=keys)
#Memberships
for membership in project.memberships.exclude(user=None).exclude(user=project.owner):
_push_to_timelines(project, membership.user, membership, "create", membership.created_at)
for project in projects.iterator():
print("Project:", bulk_creator.created)
extra_data = {
"values_diff": {},
"user": extract_user_info(project.owner),
}
_push_to_timelines(project, project.owner, project, "create", project.created_date, extra_data=extra_data)
del extra_data
for historyEntry in history_entries.iterator():
print("History entry:", historyEntry.created_at)
try:
on_new_history_entry(None, historyEntry, None)
except ObjectDoesNotExist as e:
print("Ignoring")
bulk_creator.flush()
class Command(BaseCommand):
help = 'Regenerate project timeline'
option_list = BaseCommand.option_list + (
make_option('--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing timelines'),
) + (
make_option('--initial_date',
action='store',
dest='initial_date',
default=None,
help='Initial date for timeline generation'),
) + (
make_option('--final_date',
action='store',
dest='final_date',
default=None,
help='Final date for timeline generation'),
) + (
make_option('--project',
action='store',
dest='project',
default=None,
help='Selected project id for timeline generation'),
)
@override_settings(DEBUG=False)
def handle(self, *args, **options):
if options["purge"] == True:
Timeline.objects.all().delete()
generate_timeline(options["initial_date"], options["final_date"], options["project"]) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
class Binary < ActiveRecord::Base
end | ruby | github | https://github.com/rails/rails | activerecord/test/models/binary.rb |
"""All Error Types pertaining to Enrollment."""
class CourseEnrollmentError(Exception):
"""Generic Course Enrollment Error.
Describes any error that may occur when reading or updating enrollment information for a user or a course.
"""
def __init__(self, msg, data=None):
super(CourseEnrollmentError, self).__init__(msg)
# Corresponding information to help resolve the error.
self.data = data
class CourseNotFoundError(CourseEnrollmentError):
pass
class UserNotFoundError(CourseEnrollmentError):
pass
class CourseEnrollmentClosedError(CourseEnrollmentError):
pass
class CourseEnrollmentFullError(CourseEnrollmentError):
pass
class CourseEnrollmentExistsError(CourseEnrollmentError):
enrollment = None
def __init__(self, message, enrollment):
super(CourseEnrollmentExistsError, self).__init__(message)
self.enrollment = enrollment
class CourseModeNotFoundError(CourseEnrollmentError):
"""The requested course mode could not be found."""
pass
class EnrollmentNotFoundError(CourseEnrollmentError):
"""The requested enrollment could not be found."""
pass
class EnrollmentApiLoadError(CourseEnrollmentError):
"""The data API could not be loaded."""
pass | unknown | codeparrot/codeparrot-clean | ||
#!/bin/bash
# Copyright 2021 The Prometheus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
mkdir -p ./dist
indexFile='./dist/index.d.ts'
if [[ -f ${indexFile} ]]; then
rm ${indexFile}
fi
cat <<EOF >> ${indexFile}
// Copyright 2021 The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file was generated by lezer-promql. You probably should not edit it.
import { LRParser } from '@lezer/lr'
export const parser: LRParser
$(sed -E 's/ = [0-9]+/: number/' src/parser.terms.js)
EOF | unknown | github | https://github.com/prometheus/prometheus | web/ui/module/lezer-promql/generate-types.sh |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package vault_test
import (
"encoding/json"
"testing"
"time"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers/minimal"
)
func TestExpiration_RenewToken_TestCluster(t *testing.T) {
t.Parallel()
cluster := minimal.NewTestSoloCluster(t, nil)
client := cluster.Cores[0].Client
// Mount the auth backend
err := client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{
Type: "approle",
})
if err != nil {
t.Fatal(err)
}
// Tune the mount
err = client.Sys().TuneMount("auth/approle", api.MountConfigInput{
DefaultLeaseTTL: "5s",
MaxLeaseTTL: "5s",
})
if err != nil {
t.Fatal(err)
}
// Create role
resp, err := client.Logical().Write("auth/approle/role/role-period", map[string]interface{}{
"period": "5s",
})
if err != nil {
t.Fatal(err)
}
// Get role_id
resp, err = client.Logical().Read("auth/approle/role/role-period/role-id")
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected a response for fetching the role-id")
}
roleID := resp.Data["role_id"]
// Get secret_id
resp, err = client.Logical().Write("auth/approle/role/role-period/secret-id", map[string]interface{}{})
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected a response for fetching the secret-id")
}
secretID := resp.Data["secret_id"]
// Login
resp, err = client.Logical().Write("auth/approle/login", map[string]interface{}{
"role_id": roleID,
"secret_id": secretID,
})
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected a response for login")
}
if resp.Auth == nil {
t.Fatal("expected auth object from response")
}
if resp.Auth.ClientToken == "" {
t.Fatal("expected a client token")
}
roleToken := resp.Auth.ClientToken
// Wait 3 seconds
time.Sleep(3 * time.Second)
// Renew
resp, err = client.Logical().Write("auth/token/renew", map[string]interface{}{
"token": roleToken,
})
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected a response for renew")
}
// Perform token lookup and verify TTL
resp, err = client.Auth().Token().Lookup(roleToken)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected a response for token lookup")
}
ttlRaw, ok := resp.Data["ttl"].(json.Number)
if !ok {
t.Fatal("no ttl value found in data object")
}
ttlInt, err := ttlRaw.Int64()
if err != nil {
t.Fatalf("unable to convert ttl to int: %s", err)
}
ttl := time.Duration(ttlInt) * time.Second
if ttl < 4*time.Second {
t.Fatal("expected ttl value to be around 5s")
}
// Wait 3 seconds
time.Sleep(3 * time.Second)
// Do a second renewal to ensure that period can be renewed past sys/mount max_ttl
resp, err = client.Logical().Write("auth/token/renew", map[string]interface{}{
"token": roleToken,
})
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected a response for renew")
}
// Perform token lookup and verify TTL
resp, err = client.Auth().Token().Lookup(roleToken)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected a response for token lookup")
}
ttlRaw, ok = resp.Data["ttl"].(json.Number)
if !ok {
t.Fatal("no ttl value found in data object")
}
ttlInt, err = ttlRaw.Int64()
if err != nil {
t.Fatalf("unable to convert ttl to int: %s", err)
}
ttl = time.Duration(ttlInt) * time.Second
if ttl < 4*time.Second {
t.Fatal("expected ttl value to be around 5s")
}
} | go | github | https://github.com/hashicorp/vault | vault/expiration_integ_test.go |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package refactoring
import (
"fmt"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/tfdiags"
)
type MoveStatement struct {
From, To *addrs.MoveEndpointInModule
DeclRange tfdiags.SourceRange
// Provider is the provider configuration that applies to the "to" address
// of this move. As in, the provider that will manage the resource after
// it has been moved.
//
// This may be null if the "to" address points to a module instead of a
// resource.
Provider *addrs.AbsProviderConfig
// Implied is true for statements produced by ImpliedMoveStatements, and
// false for statements produced by FindMoveStatements.
//
// An "implied" statement is one that has no explicit "moved" block in
// the configuration and was instead generated automatically based on a
// comparison between current configuration and previous run state.
// For implied statements, the DeclRange field contains the source location
// of something in the source code that implied the statement, in which
// case it would probably be confusing to show that source range to the
// user, e.g. in an error message, without clearly mentioning that it's
// related to an implied move statement.
Implied bool
}
// FindMoveStatements recurses through the modules of the given configuration
// and returns a flat set of all "moved" blocks defined within, in a
// deterministic but undefined order.
func FindMoveStatements(rootCfg *configs.Config) []MoveStatement {
return findMoveStatements(rootCfg, nil)
}
func findMoveStatements(cfg *configs.Config, into []MoveStatement) []MoveStatement {
modAddr := cfg.Path
for _, mc := range cfg.Module.Moved {
fromAddr, toAddr := addrs.UnifyMoveEndpoints(modAddr, mc.From, mc.To)
if fromAddr == nil || toAddr == nil {
// Invalid combination should've been caught during original
// configuration decoding, in the configs package.
panic(fmt.Sprintf("incompatible move endpoints in %s", mc.DeclRange))
}
stmt := MoveStatement{
From: fromAddr,
To: toAddr,
DeclRange: tfdiags.SourceRangeFromHCL(mc.DeclRange),
Implied: false,
}
// We have the statement, let's see if we should attach a provider to
// it.
if toResource, ok := mc.To.ConfigMoveable(addrs.RootModule).(addrs.ConfigResource); ok {
// Only attach providers if we are moving resources, and we attach
// the to resource provider from the config. We can retrieve the
// from resource provider from the state later.
modCfg := cfg.Descendant(toResource.Module)
// It's possible that multiple refactorings have left a moved block
// that points to a module which no longer exists. This may also be
// a mistake, but the user will see the unexpected deletion in the
// plan if it is.
if modCfg != nil {
resourceConfig := modCfg.Module.ResourceByAddr(toResource.Resource)
if resourceConfig != nil {
// Check the target resource config actually exists before we
// try and extract the provider from them.
stmt.Provider = &addrs.AbsProviderConfig{
Module: modAddr,
Provider: resourceConfig.Provider,
}
if resourceConfig.ProviderConfigRef != nil {
stmt.Provider.Alias = resourceConfig.ProviderConfigRef.Alias
}
}
}
}
into = append(into, stmt)
}
for _, childCfg := range cfg.Children {
into = findMoveStatements(childCfg, into)
}
return into
}
// ImpliedMoveStatements compares addresses in the given state with addresses
// in the given configuration and potentially returns additional MoveStatement
// objects representing moves we infer automatically, even though they aren't
// explicitly recorded in the configuration.
//
// We do this primarily for backward compatibility with behaviors of Terraform
// versions prior to introducing explicit "moved" blocks. Specifically, this
// function aims to achieve the same result as the "NodeCountBoundary"
// heuristic from Terraform v1.0 and earlier, where adding or removing the
// "count" meta-argument from an already-created resource can automatically
// preserve the zeroth or the NoKey instance, depending on the direction of
// the change. We do this only for resources that aren't mentioned already
// in at least one explicit move statement.
//
// As with the previous-version heuristics it replaces, this is a best effort
// and doesn't handle all situations. An explicit move statement is always
// preferred, but our goal here is to match exactly the same cases that the
// old heuristic would've matched, to retain compatibility for existing modules.
//
// We should think very hard before adding any _new_ implication rules for
// moved statements.
func ImpliedMoveStatements(rootCfg *configs.Config, prevRunState *states.State, explicitStmts []MoveStatement) []MoveStatement {
return impliedMoveStatements(rootCfg, prevRunState, explicitStmts, nil)
}
func impliedMoveStatements(cfg *configs.Config, prevRunState *states.State, explicitStmts []MoveStatement, into []MoveStatement) []MoveStatement {
modAddr := cfg.Path
// There can be potentially many instances of the module, so we need
// to consider each of them separately.
for _, modState := range prevRunState.ModuleInstances(modAddr) {
// What we're looking for here is either a no-key resource instance
// where the configuration has count set or a zero-key resource
// instance where the configuration _doesn't_ have count set.
// If so, we'll generate a statement replacing no-key with zero-key or
// vice-versa.
for _, rState := range modState.Resources {
rAddr := rState.Addr
rCfg := cfg.Module.ResourceByAddr(rAddr.Resource)
if rCfg == nil {
// If there's no configuration at all then there can't be any
// automatic move fixup to do.
continue
}
approxSrcRange := tfdiags.SourceRangeFromHCL(rCfg.DeclRange)
// NOTE: We're intentionally not checking to see whether the
// "to" addresses in our implied statements already have
// instances recorded in state, because ApplyMoves should
// deal with such conflicts in a deterministic way for both
// explicit and implicit moves, and we'd rather have that
// handled all in one place.
var fromKey, toKey addrs.InstanceKey
switch {
case rCfg.Count != nil:
// If we have a count expression then we'll use _that_ as
// a slightly-more-precise approximate source range.
approxSrcRange = tfdiags.SourceRangeFromHCL(rCfg.Count.Range())
if riState := rState.Instances[addrs.NoKey]; riState != nil {
fromKey = addrs.NoKey
toKey = addrs.IntKey(0)
}
case rCfg.Count == nil && rCfg.ForEach == nil: // no repetition at all
if riState := rState.Instances[addrs.IntKey(0)]; riState != nil {
fromKey = addrs.IntKey(0)
toKey = addrs.NoKey
}
}
if fromKey != toKey {
// We mustn't generate an implied statement if the user already
// wrote an explicit statement referring to this resource,
// because they may wish to select an instance key other than
// zero as the one to retain.
if !haveMoveStatementForResource(rAddr, explicitStmts) {
resource := cfg.Descendant(addrs.RootModule).Module.ResourceByAddr(rAddr.Resource)
provider := &addrs.AbsProviderConfig{
Module: rAddr.Module.Module(),
Provider: resource.Provider,
}
if resource.ProviderConfigRef != nil {
provider.Alias = resource.ProviderConfigRef.Alias
}
into = append(into, MoveStatement{
From: addrs.ImpliedMoveStatementEndpoint(rAddr.Instance(fromKey), approxSrcRange),
To: addrs.ImpliedMoveStatementEndpoint(rAddr.Instance(toKey), approxSrcRange),
Provider: provider,
DeclRange: approxSrcRange,
Implied: true,
})
}
}
}
}
for _, childCfg := range cfg.Children {
into = impliedMoveStatements(childCfg, prevRunState, explicitStmts, into)
}
return into
}
func (s *MoveStatement) ObjectKind() addrs.MoveEndpointKind {
// addrs.UnifyMoveEndpoints guarantees that both of our addresses have
// the same kind, so we can just arbitrary use From and assume To will
// match it.
return s.From.ObjectKind()
}
// Name is used internally for displaying the statement graph
func (s *MoveStatement) Name() string {
return fmt.Sprintf("%s->%s", s.From, s.To)
}
func haveMoveStatementForResource(addr addrs.AbsResource, stmts []MoveStatement) bool {
// This is not a particularly optimal way to answer this question,
// particularly since our caller calls this function in a loop already,
// but we expect the total number of explicit statements to be small
// in any reasonable Terraform configuration and so a more complicated
// approach wouldn't be justified here.
for _, stmt := range stmts {
if stmt.From.SelectsResource(addr) {
return true
}
if stmt.To.SelectsResource(addr) {
return true
}
}
return false
} | go | github | https://github.com/hashicorp/terraform | internal/refactoring/move_statement.go |
import collections
from math import ceil
from django.utils import six
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0,
allow_empty_first_page=True):
self.object_list = object_list
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
def validate_number(self, number):
"""
Validates the given 1-based page number.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return self._get_page(self.object_list[bottom:top], number, self)
def _get_page(self, *args, **kwargs):
"""
Returns an instance of a single page.
This hook can be used by subclasses to use an alternative to the
standard :cls:`Page` object.
"""
return Page(*args, **kwargs)
def _get_count(self):
"""
Returns the total number of objects, across all pages.
"""
if self._count is None:
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"""
Returns the total number of pages.
"""
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return list(six.moves.range(1, self.num_pages + 1))
page_range = property(_get_page_range)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(collections.Sequence):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
if not isinstance(index, (slice,) + six.integer_types):
raise TypeError
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
if not isinstance(self.object_list, list):
self.object_list = list(self.object_list)
return self.object_list[index]
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.paginator.validate_number(self.number + 1)
def previous_page_number(self):
return self.paginator.validate_number(self.number - 1)
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page | unknown | codeparrot/codeparrot-clean | ||
# -*- test-case-name: twisted.conch.test.test_insults -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
VT102 and VT220 terminal manipulation.
@author: Jp Calderone
"""
from zope.interface import implementer, Interface
from twisted.internet import protocol, defer, interfaces as iinternet
from twisted.python.compat import intToBytes, iterbytes, networkString
class ITerminalProtocol(Interface):
def makeConnection(transport):
"""
Called with an L{ITerminalTransport} when a connection is established.
"""
def keystrokeReceived(keyID, modifier):
"""
A keystroke was received.
Each keystroke corresponds to one invocation of this method.
keyID is a string identifier for that key. Printable characters
are represented by themselves. Control keys, such as arrows and
function keys, are represented with symbolic constants on
L{ServerProtocol}.
"""
def terminalSize(width, height):
"""
Called to indicate the size of the terminal.
A terminal of 80x24 should be assumed if this method is not
called. This method might not be called for real terminals.
"""
def unhandledControlSequence(seq):
"""
Called when an unsupported control sequence is received.
@type seq: L{str}
@param seq: The whole control sequence which could not be interpreted.
"""
def connectionLost(reason):
"""
Called when the connection has been lost.
reason is a Failure describing why.
"""
@implementer(ITerminalProtocol)
class TerminalProtocol(object):
def makeConnection(self, terminal):
# assert ITerminalTransport.providedBy(transport), "TerminalProtocol.makeConnection must be passed an ITerminalTransport implementor"
self.terminal = terminal
self.connectionMade()
def connectionMade(self):
"""
Called after a connection has been established.
"""
def keystrokeReceived(self, keyID, modifier):
pass
def terminalSize(self, width, height):
pass
def unhandledControlSequence(self, seq):
pass
def connectionLost(self, reason):
pass
class ITerminalTransport(iinternet.ITransport):
def cursorUp(n=1):
"""
Move the cursor up n lines.
"""
def cursorDown(n=1):
"""
Move the cursor down n lines.
"""
def cursorForward(n=1):
"""
Move the cursor right n columns.
"""
def cursorBackward(n=1):
"""
Move the cursor left n columns.
"""
def cursorPosition(column, line):
"""
Move the cursor to the given line and column.
"""
def cursorHome():
"""
Move the cursor home.
"""
def index():
"""
Move the cursor down one line, performing scrolling if necessary.
"""
def reverseIndex():
"""
Move the cursor up one line, performing scrolling if necessary.
"""
def nextLine():
"""
Move the cursor to the first position on the next line, performing scrolling if necessary.
"""
def saveCursor():
"""
Save the cursor position, character attribute, character set, and origin mode selection.
"""
def restoreCursor():
"""
Restore the previously saved cursor position, character attribute, character set, and origin mode selection.
If no cursor state was previously saved, move the cursor to the home position.
"""
def setModes(modes):
"""
Set the given modes on the terminal.
"""
def resetModes(mode):
"""
Reset the given modes on the terminal.
"""
def setPrivateModes(modes):
"""
Set the given DEC private modes on the terminal.
"""
def resetPrivateModes(modes):
"""
Reset the given DEC private modes on the terminal.
"""
def applicationKeypadMode():
"""
Cause keypad to generate control functions.
Cursor key mode selects the type of characters generated by cursor keys.
"""
def numericKeypadMode():
"""
Cause keypad to generate normal characters.
"""
def selectCharacterSet(charSet, which):
"""
Select a character set.
charSet should be one of CS_US, CS_UK, CS_DRAWING, CS_ALTERNATE, or
CS_ALTERNATE_SPECIAL.
which should be one of G0 or G1.
"""
def shiftIn():
"""
Activate the G0 character set.
"""
def shiftOut():
"""
Activate the G1 character set.
"""
def singleShift2():
"""
Shift to the G2 character set for a single character.
"""
def singleShift3():
"""
Shift to the G3 character set for a single character.
"""
def selectGraphicRendition(*attributes):
"""
Enabled one or more character attributes.
Arguments should be one or more of UNDERLINE, REVERSE_VIDEO, BLINK, or BOLD.
NORMAL may also be specified to disable all character attributes.
"""
def horizontalTabulationSet():
"""
Set a tab stop at the current cursor position.
"""
def tabulationClear():
"""
Clear the tab stop at the current cursor position.
"""
def tabulationClearAll():
"""
Clear all tab stops.
"""
def doubleHeightLine(top=True):
"""
Make the current line the top or bottom half of a double-height, double-width line.
If top is True, the current line is the top half. Otherwise, it is the bottom half.
"""
def singleWidthLine():
"""
Make the current line a single-width, single-height line.
"""
def doubleWidthLine():
"""
Make the current line a double-width line.
"""
def eraseToLineEnd():
"""
Erase from the cursor to the end of line, including cursor position.
"""
def eraseToLineBeginning():
"""
Erase from the cursor to the beginning of the line, including the cursor position.
"""
def eraseLine():
"""
Erase the entire cursor line.
"""
def eraseToDisplayEnd():
"""
Erase from the cursor to the end of the display, including the cursor position.
"""
def eraseToDisplayBeginning():
"""
Erase from the cursor to the beginning of the display, including the cursor position.
"""
def eraseDisplay():
"""
Erase the entire display.
"""
def deleteCharacter(n=1):
"""
Delete n characters starting at the cursor position.
Characters to the right of deleted characters are shifted to the left.
"""
def insertLine(n=1):
"""
Insert n lines at the cursor position.
Lines below the cursor are shifted down. Lines moved past the bottom margin are lost.
This command is ignored when the cursor is outside the scroll region.
"""
def deleteLine(n=1):
"""
Delete n lines starting at the cursor position.
Lines below the cursor are shifted up. This command is ignored when the cursor is outside
the scroll region.
"""
def reportCursorPosition():
"""
Return a Deferred that fires with a two-tuple of (x, y) indicating the cursor position.
"""
def reset():
"""
Reset the terminal to its initial state.
"""
def unhandledControlSequence(seq):
"""
Called when an unsupported control sequence is received.
@type seq: L{str}
@param seq: The whole control sequence which could not be interpreted.
"""
CSI = b'\x1b'
CST = {b'~': b'tilde'}
class modes:
"""
ECMA 48 standardized modes
"""
# BREAKS YOPUR KEYBOARD MOFO
KEYBOARD_ACTION = KAM = 2
# When set, enables character insertion. New display characters
# move old display characters to the right. Characters moved past
# the right margin are lost.
# When reset, enables replacement mode (disables character
# insertion). New display characters replace old display
# characters at cursor position. The old character is erased.
INSERTION_REPLACEMENT = IRM = 4
# Set causes a received linefeed, form feed, or vertical tab to
# move cursor to first column of next line. RETURN transmits both
# a carriage return and linefeed. This selection is also called
# new line option.
# Reset causes a received linefeed, form feed, or vertical tab to
# move cursor to next line in current column. RETURN transmits a
# carriage return.
LINEFEED_NEWLINE = LNM = 20
class privateModes:
"""
ANSI-Compatible Private Modes
"""
ERROR = 0
CURSOR_KEY = 1
ANSI_VT52 = 2
COLUMN = 3
SCROLL = 4
SCREEN = 5
ORIGIN = 6
AUTO_WRAP = 7
AUTO_REPEAT = 8
PRINTER_FORM_FEED = 18
PRINTER_EXTENT = 19
# Toggle cursor visibility (reset hides it)
CURSOR_MODE = 25
# Character sets
CS_US = b'CS_US'
CS_UK = b'CS_UK'
CS_DRAWING = b'CS_DRAWING'
CS_ALTERNATE = b'CS_ALTERNATE'
CS_ALTERNATE_SPECIAL = b'CS_ALTERNATE_SPECIAL'
# Groupings (or something?? These are like variables that can be bound to character sets)
G0 = b'G0'
G1 = b'G1'
# G2 and G3 cannot be changed, but they can be shifted to.
G2 = b'G2'
G3 = b'G3'
# Character attributes
NORMAL = 0
BOLD = 1
UNDERLINE = 4
BLINK = 5
REVERSE_VIDEO = 7
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
def log(s):
with open('log', 'a') as f:
f.write(str(s) + '\n')
# XXX TODO - These attributes are really part of the
# ITerminalTransport interface, I think.
_KEY_NAMES = ('UP_ARROW', 'DOWN_ARROW', 'RIGHT_ARROW', 'LEFT_ARROW',
'HOME', 'INSERT', 'DELETE', 'END', 'PGUP', 'PGDN', 'NUMPAD_MIDDLE',
'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9',
'F10', 'F11', 'F12',
'ALT', 'SHIFT', 'CONTROL')
class _const(object):
"""
@ivar name: A string naming this constant
"""
def __init__(self, name):
self.name = name
def __repr__(self):
return '[' + self.name + ']'
def __bytes__(self):
return ('[' + self.name + ']').encode("ascii")
FUNCTION_KEYS = [
_const(_name).__bytes__() for _name in _KEY_NAMES]
@implementer(ITerminalTransport)
class ServerProtocol(protocol.Protocol):
protocolFactory = None
terminalProtocol = None
TAB = b'\t'
BACKSPACE = b'\x7f'
##
lastWrite = b''
state = b'data'
termSize = Vector(80, 24)
cursorPos = Vector(0, 0)
scrollRegion = None
# Factory who instantiated me
factory = None
def __init__(self, protocolFactory=None, *a, **kw):
"""
@param protocolFactory: A callable which will be invoked with
*a, **kw and should return an ITerminalProtocol implementor.
This will be invoked when a connection to this ServerProtocol
is established.
@param a: Any positional arguments to pass to protocolFactory.
@param kw: Any keyword arguments to pass to protocolFactory.
"""
# assert protocolFactory is None or ITerminalProtocol.implementedBy(protocolFactory), "ServerProtocol.__init__ must be passed an ITerminalProtocol implementor"
if protocolFactory is not None:
self.protocolFactory = protocolFactory
self.protocolArgs = a
self.protocolKwArgs = kw
self._cursorReports = []
def connectionMade(self):
if self.protocolFactory is not None:
self.terminalProtocol = self.protocolFactory(*self.protocolArgs, **self.protocolKwArgs)
try:
factory = self.factory
except AttributeError:
pass
else:
self.terminalProtocol.factory = factory
self.terminalProtocol.makeConnection(self)
def dataReceived(self, data):
for ch in iterbytes(data):
if self.state == b'data':
if ch == b'\x1b':
self.state = b'escaped'
else:
self.terminalProtocol.keystrokeReceived(ch, None)
elif self.state == b'escaped':
if ch == b'[':
self.state = b'bracket-escaped'
self.escBuf = []
elif ch == b'O':
self.state = b'low-function-escaped'
else:
self.state = b'data'
self._handleShortControlSequence(ch)
elif self.state == b'bracket-escaped':
if ch == b'O':
self.state = b'low-function-escaped'
elif ch.isalpha() or ch == b'~':
self._handleControlSequence(b''.join(self.escBuf) + ch)
del self.escBuf
self.state = b'data'
else:
self.escBuf.append(ch)
elif self.state == b'low-function-escaped':
self._handleLowFunctionControlSequence(ch)
self.state = b'data'
else:
raise ValueError("Illegal state")
def _handleShortControlSequence(self, ch):
self.terminalProtocol.keystrokeReceived(ch, self.ALT)
def _handleControlSequence(self, buf):
buf = b'\x1b[' + buf
f = getattr(self.controlSequenceParser,
CST.get(buf[-1:], buf[-1:]).decode("ascii"),
None)
if f is None:
self.unhandledControlSequence(buf)
else:
f(self, self.terminalProtocol, buf[:-1])
def unhandledControlSequence(self, buf):
self.terminalProtocol.unhandledControlSequence(buf)
def _handleLowFunctionControlSequence(self, ch):
functionKeys = {b'P': self.F1, b'Q': self.F2,
b'R': self.F3, b'S': self.F4}
keyID = functionKeys.get(ch)
if keyID is not None:
self.terminalProtocol.keystrokeReceived(keyID, None)
else:
self.terminalProtocol.unhandledControlSequence(b'\x1b[O' + ch)
class ControlSequenceParser:
def A(self, proto, handler, buf):
if buf == b'\x1b[':
handler.keystrokeReceived(proto.UP_ARROW, None)
else:
handler.unhandledControlSequence(buf + b'A')
def B(self, proto, handler, buf):
if buf == b'\x1b[':
handler.keystrokeReceived(proto.DOWN_ARROW, None)
else:
handler.unhandledControlSequence(buf + b'B')
def C(self, proto, handler, buf):
if buf == b'\x1b[':
handler.keystrokeReceived(proto.RIGHT_ARROW, None)
else:
handler.unhandledControlSequence(buf + b'C')
def D(self, proto, handler, buf):
if buf == b'\x1b[':
handler.keystrokeReceived(proto.LEFT_ARROW, None)
else:
handler.unhandledControlSequence(buf + b'D')
def E(self, proto, handler, buf):
if buf == b'\x1b[':
handler.keystrokeReceived(proto.NUMPAD_MIDDLE, None)
else:
handler.unhandledControlSequence(buf + b'E')
def F(self, proto, handler, buf):
if buf == b'\x1b[':
handler.keystrokeReceived(proto.END, None)
else:
handler.unhandledControlSequence(buf + b'F')
def H(self, proto, handler, buf):
if buf == b'\x1b[':
handler.keystrokeReceived(proto.HOME, None)
else:
handler.unhandledControlSequence(buf + b'H')
def R(self, proto, handler, buf):
if not proto._cursorReports:
handler.unhandledControlSequence(buf + b'R')
elif buf.startswith(b'\x1b['):
report = buf[2:]
parts = report.split(b';')
if len(parts) != 2:
handler.unhandledControlSequence(buf + b'R')
else:
Pl, Pc = parts
try:
Pl, Pc = int(Pl), int(Pc)
except ValueError:
handler.unhandledControlSequence(buf + b'R')
else:
d = proto._cursorReports.pop(0)
d.callback((Pc - 1, Pl - 1))
else:
handler.unhandledControlSequence(buf + b'R')
def Z(self, proto, handler, buf):
if buf == b'\x1b[':
handler.keystrokeReceived(proto.TAB, proto.SHIFT)
else:
handler.unhandledControlSequence(buf + b'Z')
def tilde(self, proto, handler, buf):
map = {1: proto.HOME, 2: proto.INSERT, 3: proto.DELETE,
4: proto.END, 5: proto.PGUP, 6: proto.PGDN,
15: proto.F5, 17: proto.F6, 18: proto.F7,
19: proto.F8, 20: proto.F9, 21: proto.F10,
23: proto.F11, 24: proto.F12}
if buf.startswith(b'\x1b['):
ch = buf[2:]
try:
v = int(ch)
except ValueError:
handler.unhandledControlSequence(buf + b'~')
else:
symbolic = map.get(v)
if symbolic is not None:
handler.keystrokeReceived(map[v], None)
else:
handler.unhandledControlSequence(buf + b'~')
else:
handler.unhandledControlSequence(buf + b'~')
controlSequenceParser = ControlSequenceParser()
# ITerminalTransport
def cursorUp(self, n=1):
assert n >= 1
self.cursorPos.y = max(self.cursorPos.y - n, 0)
self.write(b'\x1b[' + intToBytes(n) + b'A')
def cursorDown(self, n=1):
assert n >= 1
self.cursorPos.y = min(self.cursorPos.y + n, self.termSize.y - 1)
self.write(b'\x1b[' + intToBytes(n) + b'B')
def cursorForward(self, n=1):
assert n >= 1
self.cursorPos.x = min(self.cursorPos.x + n, self.termSize.x - 1)
self.write(b'\x1b[' + intToBytes(n) + b'C')
def cursorBackward(self, n=1):
assert n >= 1
self.cursorPos.x = max(self.cursorPos.x - n, 0)
self.write(b'\x1b[' + intToBytes(n) + b'D')
def cursorPosition(self, column, line):
self.write(b'\x1b[' +
intToBytes(line + 1) +
b';' +
intToBytes(column + 1) +
b'H')
def cursorHome(self):
self.cursorPos.x = self.cursorPos.y = 0
self.write(b'\x1b[H')
def index(self):
# ECMA48 5th Edition removes this
self.cursorPos.y = min(self.cursorPos.y + 1, self.termSize.y - 1)
self.write(b'\x1bD')
def reverseIndex(self):
self.cursorPos.y = max(self.cursorPos.y - 1, 0)
self.write(b'\x1bM')
def nextLine(self):
self.cursorPos.x = 0
self.cursorPos.y = min(self.cursorPos.y + 1, self.termSize.y - 1)
self.write(b'\n')
def saveCursor(self):
self._savedCursorPos = Vector(self.cursorPos.x, self.cursorPos.y)
self.write(b'\x1b7')
def restoreCursor(self):
self.cursorPos = self._savedCursorPos
del self._savedCursorPos
self.write(b'\x1b8')
def setModes(self, modes):
# XXX Support ANSI-Compatible private modes
modesBytes = b';'.join([intToBytes(mode) for mode in modes])
self.write(b'\x1b[' + modesBytes + b'h')
def setPrivateModes(self, modes):
modesBytes = b';'.join([intToBytes(mode) for mode in modes])
self.write(b'\x1b[?' + modesBytes + b'h')
def resetModes(self, modes):
# XXX Support ANSI-Compatible private modes
modesBytes = b';'.join([intToBytes(mode) for mode in modes])
self.write(b'\x1b[' + modesBytes + b'l')
def resetPrivateModes(self, modes):
modesBytes = b';'.join([intToBytes(mode) for mode in modes])
self.write(b'\x1b[?' + modesBytes + b'l')
def applicationKeypadMode(self):
self.write(b'\x1b=')
def numericKeypadMode(self):
self.write(b'\x1b>')
def selectCharacterSet(self, charSet, which):
# XXX Rewrite these as dict lookups
if which == G0:
which = b'('
elif which == G1:
which = b')'
else:
raise ValueError("`which' argument to selectCharacterSet must be G0 or G1")
if charSet == CS_UK:
charSet = b'A'
elif charSet == CS_US:
charSet = b'B'
elif charSet == CS_DRAWING:
charSet = b'0'
elif charSet == CS_ALTERNATE:
charSet = b'1'
elif charSet == CS_ALTERNATE_SPECIAL:
charSet = b'2'
else:
raise ValueError("Invalid `charSet' argument to selectCharacterSet")
self.write(b'\x1b' + which + charSet)
def shiftIn(self):
self.write(b'\x15')
def shiftOut(self):
self.write(b'\x14')
def singleShift2(self):
self.write(b'\x1bN')
def singleShift3(self):
self.write(b'\x1bO')
def selectGraphicRendition(self, *attributes):
# each member of attributes must be a native string
attrs = []
for a in attributes:
attrs.append(networkString(a))
self.write(b'\x1b[' +
b';'.join(attrs) +
b'm')
def horizontalTabulationSet(self):
self.write(b'\x1bH')
def tabulationClear(self):
self.write(b'\x1b[q')
def tabulationClearAll(self):
self.write(b'\x1b[3q')
def doubleHeightLine(self, top=True):
if top:
self.write(b'\x1b#3')
else:
self.write(b'\x1b#4')
def singleWidthLine(self):
self.write(b'\x1b#5')
def doubleWidthLine(self):
self.write(b'\x1b#6')
def eraseToLineEnd(self):
self.write(b'\x1b[K')
def eraseToLineBeginning(self):
self.write(b'\x1b[1K')
def eraseLine(self):
self.write(b'\x1b[2K')
def eraseToDisplayEnd(self):
self.write(b'\x1b[J')
def eraseToDisplayBeginning(self):
self.write(b'\x1b[1J')
def eraseDisplay(self):
self.write(b'\x1b[2J')
def deleteCharacter(self, n=1):
self.write(b'\x1b[' + intToBytes(n) + b'P')
def insertLine(self, n=1):
self.write(b'\x1b[' + intToBytes(n) + b'L')
def deleteLine(self, n=1):
self.write(b'\x1b[' + intToBytes(n) + b'M')
def setScrollRegion(self, first=None, last=None):
if first is not None:
first = intToBytes(first)
else:
first = b''
if last is not None:
last = intToBytes(last)
else:
last = b''
self.write(b'\x1b[' + first + b';' + last + b'r')
def resetScrollRegion(self):
self.setScrollRegion()
def reportCursorPosition(self):
d = defer.Deferred()
self._cursorReports.append(d)
self.write(b'\x1b[6n')
return d
def reset(self):
self.cursorPos.x = self.cursorPos.y = 0
try:
del self._savedCursorPos
except AttributeError:
pass
self.write(b'\x1bc')
# ITransport
def write(self, data):
if data:
if not isinstance(data, bytes):
data = data.encode("utf-8")
self.lastWrite = data
self.transport.write(b'\r\n'.join(data.split(b'\n')))
def writeSequence(self, data):
self.write(b''.join(data))
def loseConnection(self):
self.reset()
self.transport.loseConnection()
def connectionLost(self, reason):
if self.terminalProtocol is not None:
try:
self.terminalProtocol.connectionLost(reason)
finally:
self.terminalProtocol = None
# Add symbolic names for function keys
for name, const in zip(_KEY_NAMES, FUNCTION_KEYS):
setattr(ServerProtocol, name, const)
class ClientProtocol(protocol.Protocol):
terminalFactory = None
terminal = None
state = b'data'
_escBuf = None
_shorts = {
b'D': b'index',
b'M': b'reverseIndex',
b'E': b'nextLine',
b'7': b'saveCursor',
b'8': b'restoreCursor',
b'=': b'applicationKeypadMode',
b'>': b'numericKeypadMode',
b'N': b'singleShift2',
b'O': b'singleShift3',
b'H': b'horizontalTabulationSet',
b'c': b'reset'}
_longs = {
b'[': b'bracket-escape',
b'(': b'select-g0',
b')': b'select-g1',
b'#': b'select-height-width'}
_charsets = {
b'A': CS_UK,
b'B': CS_US,
b'0': CS_DRAWING,
b'1': CS_ALTERNATE,
b'2': CS_ALTERNATE_SPECIAL}
# Factory who instantiated me
factory = None
def __init__(self, terminalFactory=None, *a, **kw):
"""
@param terminalFactory: A callable which will be invoked with
*a, **kw and should return an ITerminalTransport provider.
This will be invoked when this ClientProtocol establishes a
connection.
@param a: Any positional arguments to pass to terminalFactory.
@param kw: Any keyword arguments to pass to terminalFactory.
"""
# assert terminalFactory is None or ITerminalTransport.implementedBy(terminalFactory), "ClientProtocol.__init__ must be passed an ITerminalTransport implementor"
if terminalFactory is not None:
self.terminalFactory = terminalFactory
self.terminalArgs = a
self.terminalKwArgs = kw
def connectionMade(self):
if self.terminalFactory is not None:
self.terminal = self.terminalFactory(*self.terminalArgs, **self.terminalKwArgs)
self.terminal.factory = self.factory
self.terminal.makeConnection(self)
def connectionLost(self, reason):
if self.terminal is not None:
try:
self.terminal.connectionLost(reason)
finally:
del self.terminal
def dataReceived(self, data):
"""
Parse the given data from a terminal server, dispatching to event
handlers defined by C{self.terminal}.
"""
toWrite = []
for b in iterbytes(data):
if self.state == b'data':
if b == b'\x1b':
if toWrite:
self.terminal.write(b''.join(toWrite))
del toWrite[:]
self.state = b'escaped'
elif b == b'\x14':
if toWrite:
self.terminal.write(b''.join(toWrite))
del toWrite[:]
self.terminal.shiftOut()
elif b == b'\x15':
if toWrite:
self.terminal.write(b''.join(toWrite))
del toWrite[:]
self.terminal.shiftIn()
elif b == b'\x08':
if toWrite:
self.terminal.write(b''.join(toWrite))
del toWrite[:]
self.terminal.cursorBackward()
else:
toWrite.append(b)
elif self.state == b'escaped':
fName = self._shorts.get(b)
if fName is not None:
self.state = b'data'
getattr(self.terminal, fName.decode("ascii"))()
else:
state = self._longs.get(b)
if state is not None:
self.state = state
else:
self.terminal.unhandledControlSequence(b'\x1b' + b)
self.state = b'data'
elif self.state == b'bracket-escape':
if self._escBuf is None:
self._escBuf = []
if b.isalpha() or b == b'~':
self._handleControlSequence(b''.join(self._escBuf), b)
del self._escBuf
self.state = b'data'
else:
self._escBuf.append(b)
elif self.state == b'select-g0':
self.terminal.selectCharacterSet(self._charsets.get(b, b), G0)
self.state = b'data'
elif self.state == b'select-g1':
self.terminal.selectCharacterSet(self._charsets.get(b, b), G1)
self.state = b'data'
elif self.state == b'select-height-width':
self._handleHeightWidth(b)
self.state = b'data'
else:
raise ValueError("Illegal state")
if toWrite:
self.terminal.write(b''.join(toWrite))
def _handleControlSequence(self, buf, terminal):
f = getattr(self.controlSequenceParser, CST.get(terminal, terminal).decode("ascii"), None)
if f is None:
self.terminal.unhandledControlSequence(b'\x1b[' + buf + terminal)
else:
f(self, self.terminal, buf)
class ControlSequenceParser:
def _makeSimple(ch, fName):
n = 'cursor' + fName
def simple(self, proto, handler, buf):
if not buf:
getattr(handler, n)(1)
else:
try:
m = int(buf)
except ValueError:
handler.unhandledControlSequence(b'\x1b[' + buf + ch)
else:
getattr(handler, n)(m)
return simple
for (ch, fName) in (('A', 'Up'),
('B', 'Down'),
('C', 'Forward'),
('D', 'Backward')):
exec(ch + " = _makeSimple(ch, fName)")
del _makeSimple
def h(self, proto, handler, buf):
# XXX - Handle '?' to introduce ANSI-Compatible private modes.
try:
modes = [int(mode) for mode in buf.split(b';')]
except ValueError:
handler.unhandledControlSequence(b'\x1b[' + buf + b'h')
else:
handler.setModes(modes)
def l(self, proto, handler, buf):
# XXX - Handle '?' to introduce ANSI-Compatible private modes.
try:
modes = [int(mode) for mode in buf.split(b';')]
except ValueError:
handler.unhandledControlSequence(b'\x1b[' + buf + 'l')
else:
handler.resetModes(modes)
def r(self, proto, handler, buf):
parts = buf.split(b';')
if len(parts) == 1:
handler.setScrollRegion(None, None)
elif len(parts) == 2:
try:
if parts[0]:
pt = int(parts[0])
else:
pt = None
if parts[1]:
pb = int(parts[1])
else:
pb = None
except ValueError:
handler.unhandledControlSequence(b'\x1b[' + buf + b'r')
else:
handler.setScrollRegion(pt, pb)
else:
handler.unhandledControlSequence(b'\x1b[' + buf + b'r')
def K(self, proto, handler, buf):
if not buf:
handler.eraseToLineEnd()
elif buf == b'1':
handler.eraseToLineBeginning()
elif buf == b'2':
handler.eraseLine()
else:
handler.unhandledControlSequence(b'\x1b[' + buf + b'K')
def H(self, proto, handler, buf):
handler.cursorHome()
def J(self, proto, handler, buf):
if not buf:
handler.eraseToDisplayEnd()
elif buf == b'1':
handler.eraseToDisplayBeginning()
elif buf == b'2':
handler.eraseDisplay()
else:
handler.unhandledControlSequence(b'\x1b[' + buf + b'J')
def P(self, proto, handler, buf):
if not buf:
handler.deleteCharacter(1)
else:
try:
n = int(buf)
except ValueError:
handler.unhandledControlSequence(b'\x1b[' + buf + b'P')
else:
handler.deleteCharacter(n)
def L(self, proto, handler, buf):
if not buf:
handler.insertLine(1)
else:
try:
n = int(buf)
except ValueError:
handler.unhandledControlSequence(b'\x1b[' + buf + b'L')
else:
handler.insertLine(n)
def M(self, proto, handler, buf):
if not buf:
handler.deleteLine(1)
else:
try:
n = int(buf)
except ValueError:
handler.unhandledControlSequence(b'\x1b[' + buf + b'M')
else:
handler.deleteLine(n)
def n(self, proto, handler, buf):
if buf == b'6':
x, y = handler.reportCursorPosition()
proto.transport.write(b'\x1b['
+ intToBytes(x+1)
+ b';'
+ intToBytes(y+1)
+ b'R')
else:
handler.unhandledControlSequence(b'\x1b[' + buf + b'n')
def m(self, proto, handler, buf):
if not buf:
handler.selectGraphicRendition(NORMAL)
else:
attrs = []
for a in buf.split(b';'):
try:
a = int(a)
except ValueError:
pass
attrs.append(a)
handler.selectGraphicRendition(*attrs)
controlSequenceParser = ControlSequenceParser()
def _handleHeightWidth(self, b):
if b == b'3':
self.terminal.doubleHeightLine(True)
elif b == b'4':
self.terminal.doubleHeightLine(False)
elif b == b'5':
self.terminal.singleWidthLine()
elif b == b'6':
self.terminal.doubleWidthLine()
else:
self.terminal.unhandledControlSequence(b'\x1b#' + b)
__all__ = [
# Interfaces
'ITerminalProtocol', 'ITerminalTransport',
# Symbolic constants
'modes', 'privateModes', 'FUNCTION_KEYS',
'CS_US', 'CS_UK', 'CS_DRAWING', 'CS_ALTERNATE', 'CS_ALTERNATE_SPECIAL',
'G0', 'G1', 'G2', 'G3',
'UNDERLINE', 'REVERSE_VIDEO', 'BLINK', 'BOLD', 'NORMAL',
# Protocol classes
'ServerProtocol', 'ClientProtocol'] | unknown | codeparrot/codeparrot-clean | ||
"""
:mod:`pandas.io.formats.xml` is a module for formatting data in XML.
"""
from __future__ import annotations
import codecs
import io
from typing import (
TYPE_CHECKING,
Any,
final,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.missing import isna
from pandas.io.common import get_handle
from pandas.io.xml import get_data_from_filepath
if TYPE_CHECKING:
from pandas._typing import (
CompressionOptions,
FilePath,
ReadBuffer,
StorageOptions,
WriteBuffer,
)
from pandas import DataFrame
class _BaseXMLFormatter:
"""
Subclass for formatting data in XML.
Parameters
----------
path_or_buffer : str or file-like
This can be either a string of raw XML, a valid URL,
file or file-like object.
index : bool
Whether to include index in xml document.
row_name : str
Name for root of xml document. Default is 'data'.
root_name : str
Name for row elements of xml document. Default is 'row'.
na_rep : str
Missing data representation.
attrs_cols : list
List of columns to write as attributes in row element.
elem_cols : list
List of columns to write as children in row element.
namespaces : dict
The namespaces to define in XML document as dicts with key
being namespace and value the URI.
prefix : str
The prefix for each element in XML document including root.
encoding : str
Encoding of xml object or document.
xml_declaration : bool
Whether to include xml declaration at top line item in xml.
pretty_print : bool
Whether to write xml document with line breaks and indentation.
stylesheet : str or file-like
A URL, file, file-like object, or a raw string containing XSLT.
compression : str or dict, default 'infer'
For on-the-fly compression of the output data. If 'infer' and 'path_or_buffer'
is path-like, then detect compression from the following extensions: '.gz',
'.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'
(otherwise no compression).
Set to ``None`` for no compression.
Can also be a dict with key ``'method'`` set
to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``}
and other key-value pairs are forwarded to
``zipfile.ZipFile``, ``gzip.GzipFile``,
``bz2.BZ2File``, ``zstandard.ZstdCompressor``, ``lzma.LZMAFile`` or
``tarfile.TarFile``, respectively.
As an example, the following could be passed for faster compression and to
create a reproducible gzip archive:
``compression={'method': 'gzip', 'compresslevel': 1, 'mtime': 1}``.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
See also
--------
pandas.io.formats.xml.EtreeXMLFormatter
pandas.io.formats.xml.LxmlXMLFormatter
"""
def __init__(
self,
frame: DataFrame,
path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
index: bool = True,
root_name: str | None = "data",
row_name: str | None = "row",
na_rep: str | None = None,
attr_cols: list[str] | None = None,
elem_cols: list[str] | None = None,
namespaces: dict[str | None, str] | None = None,
prefix: str | None = None,
encoding: str = "utf-8",
xml_declaration: bool | None = True,
pretty_print: bool | None = True,
stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions | None = None,
) -> None:
self.frame = frame
self.path_or_buffer = path_or_buffer
self.index = index
self.root_name = root_name
self.row_name = row_name
self.na_rep = na_rep
self.attr_cols = attr_cols
self.elem_cols = elem_cols
self.namespaces = namespaces
self.prefix = prefix
self.encoding = encoding
self.xml_declaration = xml_declaration
self.pretty_print = pretty_print
self.stylesheet = stylesheet
self.compression: CompressionOptions = compression
self.storage_options = storage_options
self.orig_cols = self.frame.columns.tolist()
self.frame_dicts = self._process_dataframe()
self._validate_columns()
self._validate_encoding()
self.prefix_uri = self._get_prefix_uri()
self._handle_indexes()
def _build_tree(self) -> bytes:
"""
Build tree from data.
This method initializes the root and builds attributes and elements
with optional namespaces.
"""
raise AbstractMethodError(self)
@final
def _validate_columns(self) -> None:
"""
Validate elems_cols and attrs_cols.
This method will check if columns is list-like.
Raises
------
ValueError
* If value is not a list and less then length of nodes.
"""
if self.attr_cols and not is_list_like(self.attr_cols):
raise TypeError(
f"{type(self.attr_cols).__name__} is not a valid type for attr_cols"
)
if self.elem_cols and not is_list_like(self.elem_cols):
raise TypeError(
f"{type(self.elem_cols).__name__} is not a valid type for elem_cols"
)
@final
def _validate_encoding(self) -> None:
"""
Validate encoding.
This method will check if encoding is among listed under codecs.
Raises
------
LookupError
* If encoding is not available in codecs.
"""
codecs.lookup(self.encoding)
@final
def _process_dataframe(self) -> dict[int | str, dict[str, Any]]:
"""
Adjust Data Frame to fit xml output.
This method will adjust underlying data frame for xml output,
including optionally replacing missing values and including indexes.
"""
df = self.frame
if self.index:
df = df.reset_index()
if self.na_rep is not None:
df = df.fillna(self.na_rep)
return df.to_dict(orient="index")
@final
def _handle_indexes(self) -> None:
"""
Handle indexes.
This method will add indexes into attr_cols or elem_cols.
"""
if not self.index:
return
first_key = next(iter(self.frame_dicts))
indexes: list[str] = [
x for x in self.frame_dicts[first_key].keys() if x not in self.orig_cols
]
if self.attr_cols:
self.attr_cols = indexes + self.attr_cols
if self.elem_cols:
self.elem_cols = indexes + self.elem_cols
def _get_prefix_uri(self) -> str:
"""
Get uri of namespace prefix.
This method retrieves corresponding URI to prefix in namespaces.
Raises
------
KeyError
*If prefix is not included in namespace dict.
"""
raise AbstractMethodError(self)
@final
def _other_namespaces(self) -> dict:
"""
Define other namespaces.
This method will build dictionary of namespaces attributes
for root element, conditionally with optional namespaces and
prefix.
"""
nmsp_dict: dict[str, str] = {}
if self.namespaces:
nmsp_dict = {
f"xmlns{p if p == '' else f':{p}'}": n
for p, n in self.namespaces.items()
if n != self.prefix_uri[1:-1]
}
return nmsp_dict
@final
def _build_attribs(self, d: dict[str, Any], elem_row: Any) -> Any:
"""
Create attributes of row.
This method adds attributes using attr_cols to row element and
works with tuples for multindex or hierarchical columns.
"""
if not self.attr_cols:
return elem_row
for col in self.attr_cols:
attr_name = self._get_flat_col_name(col)
try:
if not isna(d[col]):
elem_row.attrib[attr_name] = str(d[col])
except KeyError as err:
raise KeyError(f"no valid column, {col}") from err
return elem_row
@final
def _get_flat_col_name(self, col: str | tuple) -> str:
flat_col = col
if isinstance(col, tuple):
flat_col = (
"".join([str(c) for c in col]).strip()
if "" in col
else "_".join([str(c) for c in col]).strip()
)
return f"{self.prefix_uri}{flat_col}"
@cache_readonly
def _sub_element_cls(self):
raise AbstractMethodError(self)
@final
def _build_elems(self, d: dict[str, Any], elem_row: Any) -> None:
"""
Create child elements of row.
This method adds child elements using elem_cols to row element and
works with tuples for multindex or hierarchical columns.
"""
sub_element_cls = self._sub_element_cls
if not self.elem_cols:
return
for col in self.elem_cols:
elem_name = self._get_flat_col_name(col)
try:
val = None if isna(d[col]) or d[col] == "" else str(d[col])
sub_element_cls(elem_row, elem_name).text = val
except KeyError as err:
raise KeyError(f"no valid column, {col}") from err
@final
def write_output(self) -> str | None:
xml_doc = self._build_tree()
if self.path_or_buffer is not None:
with get_handle(
self.path_or_buffer,
"wb",
compression=self.compression,
storage_options=self.storage_options,
is_text=False,
) as handles:
handles.handle.write(xml_doc)
return None
else:
return xml_doc.decode(self.encoding).rstrip()
class EtreeXMLFormatter(_BaseXMLFormatter):
"""
Class for formatting data in xml using Python standard library
modules: `xml.etree.ElementTree` and `xml.dom.minidom`.
"""
def _build_tree(self) -> bytes:
from xml.etree.ElementTree import (
Element,
SubElement,
tostring,
)
self.root = Element(
f"{self.prefix_uri}{self.root_name}", attrib=self._other_namespaces()
)
for d in self.frame_dicts.values():
elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}")
if not self.attr_cols and not self.elem_cols:
self.elem_cols = list(d.keys())
self._build_elems(d, elem_row)
else:
elem_row = self._build_attribs(d, elem_row)
self._build_elems(d, elem_row)
self.out_xml = tostring(
self.root,
method="xml",
encoding=self.encoding,
xml_declaration=self.xml_declaration,
)
if self.pretty_print:
self.out_xml = self._prettify_tree()
if self.stylesheet is not None:
raise ValueError(
"To use stylesheet, you need lxml installed and selected as parser."
)
return self.out_xml
def _get_prefix_uri(self) -> str:
from xml.etree.ElementTree import register_namespace
uri = ""
if self.namespaces:
for p, n in self.namespaces.items():
if isinstance(p, str) and isinstance(n, str):
register_namespace(p, n)
if self.prefix:
try:
uri = f"{{{self.namespaces[self.prefix]}}}"
except KeyError as err:
raise KeyError(
f"{self.prefix} is not included in namespaces"
) from err
elif "" in self.namespaces:
uri = f"{{{self.namespaces['']}}}"
else:
uri = ""
return uri
@cache_readonly
def _sub_element_cls(self):
from xml.etree.ElementTree import SubElement
return SubElement
def _prettify_tree(self) -> bytes:
"""
Output tree for pretty print format.
This method will pretty print xml with line breaks and indentation.
"""
from xml.dom.minidom import parseString
dom = parseString(self.out_xml)
return dom.toprettyxml(indent=" ", encoding=self.encoding)
class LxmlXMLFormatter(_BaseXMLFormatter):
"""
Class for formatting data in xml using Python standard library
modules: `xml.etree.ElementTree` and `xml.dom.minidom`.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._convert_empty_str_key()
def _build_tree(self) -> bytes:
"""
Build tree from data.
This method initializes the root and builds attributes and elements
with optional namespaces.
"""
from lxml.etree import (
Element,
SubElement,
tostring,
)
self.root = Element(f"{self.prefix_uri}{self.root_name}", nsmap=self.namespaces)
for d in self.frame_dicts.values():
elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}")
if not self.attr_cols and not self.elem_cols:
self.elem_cols = list(d.keys())
self._build_elems(d, elem_row)
else:
elem_row = self._build_attribs(d, elem_row)
self._build_elems(d, elem_row)
self.out_xml = tostring(
self.root,
pretty_print=self.pretty_print,
method="xml",
encoding=self.encoding,
xml_declaration=self.xml_declaration,
)
if self.stylesheet is not None:
self.out_xml = self._transform_doc()
return self.out_xml
def _convert_empty_str_key(self) -> None:
"""
Replace zero-length string in `namespaces`.
This method will replace '' with None to align to `lxml`
requirement that empty string prefixes are not allowed.
"""
if self.namespaces and "" in self.namespaces.keys():
self.namespaces[None] = self.namespaces.pop("", "default")
def _get_prefix_uri(self) -> str:
uri = ""
if self.namespaces:
if self.prefix:
try:
uri = f"{{{self.namespaces[self.prefix]}}}"
except KeyError as err:
raise KeyError(
f"{self.prefix} is not included in namespaces"
) from err
elif "" in self.namespaces:
uri = f"{{{self.namespaces['']}}}"
else:
uri = ""
return uri
@cache_readonly
def _sub_element_cls(self):
from lxml.etree import SubElement
return SubElement
def _transform_doc(self) -> bytes:
"""
Parse stylesheet from file or buffer and run it.
This method will parse stylesheet object into tree for parsing
conditionally by its specific object type, then transforms
original tree with XSLT script.
"""
from lxml.etree import (
XSLT,
XMLParser,
fromstring,
parse,
)
style_doc = self.stylesheet
assert style_doc is not None # is ensured by caller
handle_data = get_data_from_filepath(
filepath_or_buffer=style_doc,
encoding=self.encoding,
compression=self.compression,
storage_options=self.storage_options,
)
with handle_data as xml_data:
curr_parser = XMLParser(encoding=self.encoding)
if isinstance(xml_data, io.StringIO):
xsl_doc = fromstring(
xml_data.getvalue().encode(self.encoding), parser=curr_parser
)
else:
xsl_doc = parse(xml_data, parser=curr_parser)
transformer = XSLT(xsl_doc)
new_doc = transformer(self.root)
return bytes(new_doc) | python | github | https://github.com/pandas-dev/pandas | pandas/io/formats/xml.py |
# This file is part of Lazylibrarian.
#
# Lazylibrarian is free software':'you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lazylibrarian is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Lazylibrarian. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import threading
import traceback
import lazylibrarian
from lazylibrarian import logger, database, providers, nzbget, sabnzbd, classes, synology
from lazylibrarian.cache import fetchURL
from lazylibrarian.common import scheduleJob, setperm
from lazylibrarian.formatter import plural, unaccented_str, replace_all, getList, now, check_int
from lazylibrarian.notifiers import notify_snatch
from lazylibrarian.searchtorrents import TORDownloadMethod
from lib.fuzzywuzzy import fuzz
def cron_search_nzb_book():
threading.currentThread().name = "CRON-SEARCHNZB"
search_nzb_book()
def search_nzb_book(books=None, reset=False):
try:
threadname = threading.currentThread().name
if "Thread-" in threadname:
threading.currentThread().name = "SEARCHNZB"
if not lazylibrarian.USE_NZB():
logger.warn('No NEWZNAB/TORZNAB providers set, check config')
return
myDB = database.DBConnection()
searchlist = []
if books is None:
# We are performing a backlog search
searchbooks = myDB.select(
'SELECT BookID, AuthorName, Bookname, BookSub, BookAdded from books WHERE Status="Wanted" \
order by BookAdded desc')
else:
# The user has added a new book
searchbooks = []
for book in books:
searchbook = myDB.select('SELECT BookID, AuthorName, BookName, BookSub from books WHERE BookID="%s" \
AND Status="Wanted"' % book['bookid'])
for terms in searchbook:
searchbooks.append(terms)
if len(searchbooks) == 0:
return
logger.info('NZB Searching for %i book%s' % (len(searchbooks), plural(len(searchbooks))))
for searchbook in searchbooks:
# searchterm is only used for display purposes
searchterm = searchbook['AuthorName'] + ' ' + searchbook['BookName']
if searchbook['BookSub']:
searchterm = searchterm + ': ' + searchbook['BookSub']
searchlist.append(
{"bookid": searchbook['BookID'],
"bookName": searchbook['BookName'],
"bookSub": searchbook['BookSub'],
"authorName": searchbook['AuthorName'],
"searchterm": searchterm})
nzb_count = 0
for book in searchlist:
# first attempt, try author/title in category "book"
resultlist, nproviders = providers.IterateOverNewzNabSites(book, 'book')
if not nproviders:
logger.warn('No NewzNab or TorzNab providers are set, check config')
return # no point in continuing
found = processResultList(resultlist, book, "book")
# if you can't find the book, try author/title without any "(extended details, series etc)"
if not found and '(' in book['bookName']:
resultlist, nproviders = providers.IterateOverNewzNabSites(book, 'shortbook')
found = processResultList(resultlist, book, "shortbook")
# if you can't find the book under "books", you might find under general search
if not found:
resultlist, nproviders = providers.IterateOverNewzNabSites(book, 'general')
found = processResultList(resultlist, book, "general")
if not found:
logger.info("NZB Searches for %s returned no results." % book['searchterm'])
if found > True:
nzb_count += 1 # we found it
logger.info("NZBSearch for Wanted items complete, found %s book%s" % (nzb_count, plural(nzb_count)))
if reset:
scheduleJob(action='Restart', target='search_nzb_book')
except Exception:
logger.error('Unhandled exception in search_nzb_book: %s' % traceback.format_exc())
def processResultList(resultlist, book, searchtype):
myDB = database.DBConnection()
dictrepl = {'...': '', '.': ' ', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ', '"': '',
',': ' ', '*': '', '(': '', ')': '', '[': '', ']': '', '#': '', '0': '', '1': '',
'2': '', '3': '', '4': '', '5': '', '6': '', '7': '', '8': '', '9': '', '\'': '',
':': '', '!': '', '-': ' ', '\s\s': ' '}
dic = {'...': '', '.': ' ', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ', '"': '',
',': '', '*': '', ':': '', ';': '', '\'': ''}
match_ratio = int(lazylibrarian.MATCH_RATIO)
reject_list = getList(lazylibrarian.REJECT_WORDS)
author = unaccented_str(replace_all(book['authorName'], dic))
title = unaccented_str(replace_all(book['bookName'], dic))
matches = []
for nzb in resultlist:
nzb_Title = unaccented_str(replace_all(nzb['nzbtitle'], dictrepl)).strip()
nzb_Title = re.sub(r"\s\s+", " ", nzb_Title) # remove extra whitespace
nzbAuthor_match = fuzz.token_set_ratio(author, nzb_Title)
nzbBook_match = fuzz.token_set_ratio(title, nzb_Title)
logger.debug(u"NZB author/book Match: %s/%s for %s" % (nzbAuthor_match, nzbBook_match, nzb_Title))
nzburl = nzb['nzburl']
rejected = False
already_failed = myDB.match('SELECT * from wanted WHERE NZBurl="%s" and Status="Failed"' % nzburl)
if already_failed:
logger.debug("Rejecting %s, blacklisted at %s" % (nzb_Title, already_failed['NZBprov']))
rejected = True
if not rejected:
for word in reject_list:
if word in nzb_Title.lower() and word not in author.lower() and word not in title.lower():
rejected = True
logger.debug("Rejecting %s, contains %s" % (nzb_Title, word))
break
nzbsize_temp = nzb['nzbsize'] # Need to cater for when this is NONE (Issue 35)
nzbsize_temp = check_int(nzbsize_temp, 1000)
nzbsize = round(float(nzbsize_temp) / 1048576, 2)
maxsize = check_int(lazylibrarian.REJECT_MAXSIZE, 0)
if not rejected:
if maxsize and nzbsize > maxsize:
rejected = True
logger.debug("Rejecting %s, too large" % nzb_Title)
if not rejected:
# if nzbAuthor_match >= match_ratio and nzbBook_match >= match_ratio:
bookid = book['bookid']
nzbTitle = (author + ' - ' + title + ' LL.(' + book['bookid'] + ')').strip()
nzbprov = nzb['nzbprov']
nzbmode = nzb['nzbmode']
controlValueDict = {"NZBurl": nzburl}
newValueDict = {
"NZBprov": nzbprov,
"BookID": bookid,
"NZBdate": now(), # when we asked for it
"NZBsize": nzbsize,
"NZBtitle": nzbTitle,
"NZBmode": nzbmode,
"Status": "Skipped"
}
score = (nzbBook_match + nzbAuthor_match) / 2 # as a percentage
# lose a point for each extra word in the title so we get the closest match
words = len(getList(nzb_Title))
words -= len(getList(author))
words -= len(getList(title))
score -= abs(words)
matches.append([score, nzb_Title, newValueDict, controlValueDict])
if matches:
highest = max(matches, key=lambda x: x[0])
score = highest[0]
nzb_Title = highest[1]
newValueDict = highest[2]
controlValueDict = highest[3]
if score < match_ratio:
logger.info(u'Nearest NZB match (%s%%): %s using %s search for %s %s' %
(score, nzb_Title, searchtype, author, title))
return False
logger.info(u'Best NZB match (%s%%): %s using %s search' %
(score, nzb_Title, searchtype))
snatchedbooks = myDB.match('SELECT * from books WHERE BookID="%s" and Status="Snatched"' %
newValueDict["BookID"])
if snatchedbooks:
logger.debug('%s already marked snatched' % nzb_Title)
return True # someone else found it
else:
logger.debug('%s adding to wanted' % nzb_Title)
myDB.upsert("wanted", newValueDict, controlValueDict)
if newValueDict['NZBmode'] == "torznab":
snatch = TORDownloadMethod(newValueDict["BookID"], newValueDict["NZBtitle"], controlValueDict["NZBurl"])
else:
snatch = NZBDownloadMethod(newValueDict["BookID"], newValueDict["NZBtitle"], controlValueDict["NZBurl"])
if snatch:
logger.info('Downloading %s from %s' % (newValueDict["NZBtitle"], newValueDict["NZBprov"]))
notify_snatch("%s from %s at %s" %
(newValueDict["NZBtitle"], newValueDict["NZBprov"], now()))
scheduleJob(action='Start', target='processDir')
return True + True # we found it
else:
logger.debug("No nzb's found for [%s] using searchtype %s" % (book["searchterm"], searchtype))
return False
def NZBDownloadMethod(bookid=None, nzbtitle=None, nzburl=None):
myDB = database.DBConnection()
Source = ''
downloadID = ''
if lazylibrarian.NZB_DOWNLOADER_SABNZBD and lazylibrarian.SAB_HOST:
Source = "SABNZBD"
downloadID = sabnzbd.SABnzbd(nzbtitle, nzburl, False) # returns nzb_ids or False
if lazylibrarian.NZB_DOWNLOADER_NZBGET and lazylibrarian.NZBGET_HOST:
Source = "NZBGET"
# headers = {'User-Agent': USER_AGENT}
# data = request.request_content(url=nzburl, headers=headers)
data, success = fetchURL(nzburl)
if not success:
logger.debug('Failed to read nzb data for nzbget: %s' % data)
downloadID = ''
else:
nzb = classes.NZBDataSearchResult()
nzb.extraInfo.append(data)
nzb.name = nzbtitle
nzb.url = nzburl
downloadID = nzbget.sendNZB(nzb)
if lazylibrarian.NZB_DOWNLOADER_SYNOLOGY and lazylibrarian.USE_SYNOLOGY and lazylibrarian.SYNOLOGY_HOST:
Source = "SYNOLOGY_NZB"
downloadID = synology.addTorrent(nzburl) # returns nzb_ids or False
if lazylibrarian.NZB_DOWNLOADER_BLACKHOLE:
Source = "BLACKHOLE"
nzbfile, success = fetchURL(nzburl)
if not success:
logger.warn('Error fetching nzb from url [%s]: %s' % (nzburl, nzbfile))
nzbfile = ''
if nzbfile:
nzbname = str(nzbtitle) + '.nzb'
nzbpath = os.path.join(lazylibrarian.NZB_BLACKHOLEDIR, nzbname)
try:
with open(nzbpath, 'w') as f:
f.write(nzbfile)
logger.debug('NZB file saved to: ' + nzbpath)
setperm(nzbpath)
downloadID = nzbname
except Exception as e:
logger.error('%s not writable, NZB not saved. Error: %s' % (nzbpath, str(e)))
downloadID = ''
if not Source:
logger.warn('No NZB download method is enabled, check config.')
return False
if downloadID:
logger.debug('Nzbfile has been downloaded from ' + str(nzburl))
myDB.action('UPDATE books SET status = "Snatched" WHERE BookID="%s"' % bookid)
myDB.action('UPDATE wanted SET status = "Snatched", Source = "%s", DownloadID = "%s" WHERE NZBurl="%s"' %
(Source, downloadID, nzburl))
return True
else:
logger.error(u'Failed to download nzb @ <a href="%s">%s</a>' % (nzburl, Source))
myDB.action('UPDATE wanted SET status = "Failed" WHERE NZBurl="%s"' % nzburl)
return False | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import pytest
from pandas import Categorical
import pandas._testing as tm
@pytest.fixture(params=[True, False])
def allow_fill(request):
"""Boolean 'allow_fill' parameter for Categorical.take"""
return request.param
class TestTake:
# https://github.com/pandas-dev/pandas/issues/20664
def test_take_default_allow_fill(self):
cat = Categorical(["a", "b"])
with tm.assert_produces_warning(None):
result = cat.take([0, -1])
assert result.equals(cat)
def test_take_positive_no_warning(self):
cat = Categorical(["a", "b"])
with tm.assert_produces_warning(None):
cat.take([0, 0])
def test_take_bounds(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = Categorical(["a", "b", "a"])
if allow_fill:
msg = "indices are out-of-bounds"
else:
msg = "index 4 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
cat.take([4, 5], allow_fill=allow_fill)
def test_take_empty(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = Categorical([], categories=["a", "b"])
if allow_fill:
msg = "indices are out-of-bounds"
else:
msg = "cannot do a non-empty take from an empty axes"
with pytest.raises(IndexError, match=msg):
cat.take([0], allow_fill=allow_fill)
def test_positional_take(self, ordered):
cat = Categorical(["a", "a", "b", "b"], categories=["b", "a"], ordered=ordered)
result = cat.take([0, 1, 2], allow_fill=False)
expected = Categorical(
["a", "a", "b"], categories=cat.categories, ordered=ordered
)
tm.assert_categorical_equal(result, expected)
def test_positional_take_unobserved(self, ordered):
cat = Categorical(["a", "b"], categories=["a", "b", "c"], ordered=ordered)
result = cat.take([1, 0], allow_fill=False)
expected = Categorical(["b", "a"], categories=cat.categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_take_allow_fill(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = Categorical(["a", "a", "b"])
result = cat.take([0, -1, -1], allow_fill=True)
expected = Categorical(["a", np.nan, np.nan], categories=["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_take_fill_with_negative_one(self):
# -1 was a category
cat = Categorical([-1, 0, 1])
result = cat.take([0, -1, 1], allow_fill=True, fill_value=-1)
expected = Categorical([-1, -1, 0], categories=[-1, 0, 1])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = Categorical(["a", "b", "c"])
result = cat.take([0, 1, -1], fill_value="a", allow_fill=True)
expected = Categorical(["a", "b", "a"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value_new_raises(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = Categorical(["a", "b", "c"])
xpr = r"Cannot setitem on a Categorical with a new category \(d\)"
with pytest.raises(TypeError, match=xpr):
cat.take([0, 1, -1], fill_value="d", allow_fill=True) | python | github | https://github.com/pandas-dev/pandas | pandas/tests/arrays/categorical/test_take.py |
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.template import loader, RequestContext
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
DEFAULT_TEMPLATE = 'flatpages/default.html'
# This view is called from FlatpageFallbackMiddleware.process_response
# when a 404 is raised, which often means CsrfViewMiddleware.process_view
# has not been called even if CsrfViewMiddleware is installed. So we need
# to use @csrf_protect, in case the template needs {% csrf_token %}.
# However, we can't just wrap this view; if no matching flatpage exists,
# or a redirect is required for authentication, the 404 needs to be returned
# without any CSRF checks. Therefore, we only
# CSRF protect the internal implementation.
def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or :template:`flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.startswith('/'):
url = '/' + url
site_id = get_current_site(request).id
try:
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_flatpage(request, f)
@csrf_protect
def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
c = RequestContext(request, {
'flatpage': f,
})
response = HttpResponse(t.render(c))
return response | unknown | codeparrot/codeparrot-clean | ||
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: konnectivity-agent
namespace: kube-system
name: konnectivity-agent
spec:
selector:
matchLabels:
k8s-app: konnectivity-agent
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: konnectivity-agent
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- operator: "Exists"
effect: "NoExecute"
nodeSelector:
kubernetes.io/os: linux
containers:
- image: registry.k8s.io/kas-network-proxy/proxy-agent:v0.34.0
name: konnectivity-agent
command: ["/proxy-agent"]
args: [
"--logtostderr=true",
"--ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
__EXTRA_PARAMS__
"--proxy-server-host=__APISERVER_IP__",
"--proxy-server-port=8132",
"--sync-interval=5s",
"--sync-interval-cap=30s",
"--probe-interval=5s",
"--keepalive-time=60s",
"--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token",
"--agent-identifiers=ipv4=$(HOST_IP)"
]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
resources:
requests:
cpu: 50m
limits:
memory: 30Mi
volumeMounts:
__EXTRA_VOL_MNTS__
- mountPath: /var/run/secrets/tokens
name: konnectivity-agent-token
livenessProbe:
httpGet:
port: 8093
path: /healthz
initialDelaySeconds: 15
timeoutSeconds: 15
serviceAccountName: konnectivity-agent
volumes:
__EXTRA_VOLS__
- name: konnectivity-agent-token
projected:
sources:
- serviceAccountToken:
path: konnectivity-agent-token
audience: system:konnectivity-server | unknown | github | https://github.com/kubernetes/kubernetes | cluster/gce/addons/konnectivity-agent/konnectivity-agent-ds.yaml |
##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import sys, math
from IECore import *
class TestImageReadersAndWriters(unittest.TestCase):
"""
generic image reader and writer testing
"""
def testHandlers(self):
"""
test each reader and writer with grayscale (equal R,G,B channels)
image
"""
#print ''
image_handlers = ["CIN", "JPEG", "TIFF", "EXR"]
# construct the simple ImagePrimitive
width = 16
height = 16
channel = {}
channel["R"] = IECore.FloatVectorData(width * height)
for i in range(0, width*height):
channel["R"][i] = i / 255.0
channel["G"] = IECore.FloatVectorData(width * height)
for i in range(0, width*height):
channel["G"][i] = ((i + 128)%(width*height-1)) / 255.0
channel["B"] = IECore.FloatVectorData(width * height)
for i in range(0, width*height):
channel["B"][i] = ((i + 203)%(width*height-1)) / 255.0
b = IECore.Box2i(IECore.V2i(0, 0), IECore.V2i(width-1, height-1))
image = IECore.ImagePrimitive(b, b)
image["R"] = IECore.PrimitiveVariable(IECore.PrimitiveVariable.Interpolation.Vertex, channel["R"])
image["G"] = IECore.PrimitiveVariable(IECore.PrimitiveVariable.Interpolation.Vertex, channel["G"])
image["B"] = IECore.PrimitiveVariable(IECore.PrimitiveVariable.Interpolation.Vertex, channel["B"])
# test the handlers
for image_type in image_handlers:
# write the file
writer = IECore.Writer.create(image, "test/generic_image.%s" % image_type.lower())
self.assert_( writer.isInstanceOf( "ImageWriter" ) )
self.assert_( writer["object"].isInstanceOf( "ObjectParameter" ) )
self.assertEqual( writer["object"].validTypes(), [IECore.TypeId.ImagePrimitive] )
self.assert_( writer.resultParameter().isInstanceOf( "ObjectParameter" ) )
self.assertEqual( writer.resultParameter().validTypes(), [IECore.TypeId.ImagePrimitive] )
writer.write()
# read the file
reader = IECore.Reader.create("test/generic_image.%s" % image_type.lower())
self.assert_( reader.isInstanceOf( "ImageReader" ) )
self.assert_( reader.resultParameter().isInstanceOf( "ObjectParameter" ) )
self.assertEqual( reader.resultParameter().validTypes(), [IECore.TypeId.ImagePrimitive] )
read_image = reader.read()
# write back out to verify
IECore.Writer.create(read_image, "test/generic_image.blab.%s" % image_type.lower()).write()
# compare pixel values
for cn in ["R", "G", "B"]:
read_channel = read_image[cn].data
sum_of_diff = 0.0
for i in range(0, width*height):
# big epsilon for now. this should depend on the encoding. for example,
# CIN will have some big errors because we quantize down to a 10-bit log space.
# JPEG hurts on these non-photolike images
epsilon = image_type == 'CIN' and 0.005 or image_type == 'JPEG' and 0.5 or 0.000001
diff = read_channel[i] - channel[cn][i]
sum_of_diff += diff
if math.fabs(diff) > epsilon:
print '%s channel %s: difference between write and read at %d: %f (%f read vs %f generated)' % (image_type, cn, i, diff, read_channel[i], channel[cn][i])
raise Exception("bad image write or read: encoding is %s" % image_type)
if math.fabs(sum_of_diff) > (image_type == 'JPEG' and 0.2 or 0.1):
print 'sum of differences:', sum_of_diff
raise Exception("bad image write or reader: encoding is %s" % image_type)
def testWindows(self):
"""
test each reader and writer combo for windowed (axis-aligned sub-rectangle) operations
"""
#print ''
image_handlers = ["CIN", "JPEG", "TIFF", "EXR"]
# construct the simple ImagePrimitive
width = 16
height = 16
channel = IECore.FloatVectorData(width * height)
for i in range(0, width*height):
channel[i] = i / 255.0
b = IECore.Box2i(IECore.V2i(0, 0), IECore.V2i(width-1, height-1))
image = IECore.ImagePrimitive(b, b)
image["R"] = IECore.PrimitiveVariable(IECore.PrimitiveVariable.Interpolation.Vertex, channel)
image["G"] = IECore.PrimitiveVariable(IECore.PrimitiveVariable.Interpolation.Vertex, channel)
image["B"] = IECore.PrimitiveVariable(IECore.PrimitiveVariable.Interpolation.Vertex, channel)
# get smaller sub-window
sub_box = Box2iData(Box2i(V2i(0, 0), V2i(7, 7)))
# test the handlers
for image_type in image_handlers:
# write the file
IECore.Writer.create(image, "test/generic_image.window.%s" % image_type.lower()).write()
# read the file with a 1/4 area subwindow starting at origin
r = IECore.Reader.create("test/generic_image.window.%s" % image_type.lower())
r.parameters().dataWindow.setValue(sub_box)
read_image = r.read()
# write out the file
IECore.Writer.create(read_image, "test/generic_image_cut.%s" % image_type.lower()).write()
# assert proper image extents
# compare pixel values
for cn in ["R", "G", "B"]:
read_channel = read_image[cn].data
sum_of_diff = 0.0
# big epsilon for now. this should depend on the encoding. for example,
# CIN will have some big errors because we quantize down to a 10-bit log space.
epsilon = image_type == 'CIN' and 0.005 or image_type == 'JPEG' and 0.00025 or 0.000001
for y in range(0, 8):
for x in range(0, 8):
ci = 16*y + x
i = 8*y + x
diff = read_channel[i] - channel[ci]
sum_of_diff += diff
if math.fabs(diff) > epsilon:
print '%s channel %s: difference between write and read at %d: %f' % (image_type, cn, i, diff)
raise Exception("bad image write or read: encoding is %s" % image_type)
if math.fabs(sum_of_diff) > 0.1:
print 'sum of differences:', sum_of_diff
raise Exception("bad image write or reader: encoding is %s" % image_type)
def testOutsideWindows(self):
"""
test each reader and writer combo for windowed (axis-aligned sub-rectangle) operations
"""
#print ''
image_handlers = ["CIN", "JPEG", "TIFF", "EXR"]
# construct the simple ImagePrimitive
width = 16
height = 16
channel = IECore.FloatVectorData(width * height)
for i in range(0, width*height):
channel[i] = i / 255.0
b = IECore.Box2i(IECore.V2i(0, 0), IECore.V2i(width-1, height-1))
image = IECore.ImagePrimitive(b, b)
image["R"] = IECore.PrimitiveVariable(IECore.PrimitiveVariable.Interpolation.Vertex, channel)
image["G"] = IECore.PrimitiveVariable(IECore.PrimitiveVariable.Interpolation.Vertex, channel)
image["B"] = IECore.PrimitiveVariable(IECore.PrimitiveVariable.Interpolation.Vertex, channel)
# get smaller sub-window
sub_box = Box2iData(Box2i(V2i(-4, -4), V2i(21, 21)))
# test the handlers
for image_type in image_handlers:
# write the file
IECore.Writer.create(image, "test/generic_image.outside.%s" % image_type.lower()).write()
# read the file with a 1/4 area subwindow starting at origin
r = IECore.Reader.create("test/generic_image.outside.%s" % image_type.lower())
r.parameters().dataWindow.setValue(sub_box)
read_image = r.read()
# write out the file
IECore.Writer.create(read_image, "test/generic_image_outside.%s" % image_type.lower()).write()
# assert proper image extents
# ...
# compare pixel values
for cn in ["R", "G", "B"]:
read_channel = read_image[cn].data
sum_of_diff = 0.0
# big epsilon for now. this should depend on the encoding. for example,
# CIN will have some big errors because we quantize down to a 10-bit log space.
epsilon = image_type == 'CIN' and 0.005 or image_type == 'JPEG' and 0.00025 or 0.000001
for y in range(0, 26):
for x in range(0, 26):
i = 26*y + x
if y < 4 or x < 4 or y >= 4 + 16 or x >= 4 + 16:
if read_channel[i] > 0.0001:
raise Exception("non-zero channel value (%f) in outside region (x,y %d,%d) of windowed read (%s encoding)" % (read_channel[i], x, y, image_type))
else:
ci = 16*(y-4) + (x-4)
diff = read_channel[i] - channel[ci]
sum_of_diff += diff
if math.fabs(diff) > epsilon:
print '%s channel %s: difference between write and read at %d, %d: %f' % (image_type, cn, x, y, diff)
raise Exception("bad image write or read: encoding is %s" % image_type)
if math.fabs(sum_of_diff) > 0.1:
print 'sum of differences:', sum_of_diff
raise Exception("bad image write or reader: encoding is %s" % image_type)
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_label
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower label.
description:
- Create, update, or destroy Ansible Tower labels. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the label.
required: True
default: null
organization:
description:
- Organization the label should be applied to.
required: True
default: null
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Add label to tower organization
tower_label:
name: Custom Label
organization: My Organization
state: present
tower_config_file: "~/tower_cli.cfg"
'''
from ansible.module_utils.ansible_tower import tower_argument_spec, tower_auth_config, tower_check_mode, HAS_TOWER_CLI
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
organization=dict(required=True),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
organization = module.params.get('organization')
state = module.params.get('state')
json_output = {'label': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
label = tower_cli.get_resource('label')
try:
org_res = tower_cli.get_resource('organization')
org = org_res.get(name=organization)
if state == 'present':
result = label.modify(name=name, organization=org['id'], create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = label.delete(name=name, organization=org['id'])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update label, organization not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update label: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011 Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Block Device Mapping Code.
"""
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova import test
from nova.tests.unit import matchers
class BlockDeviceMappingEc2CloudTestCase(test.NoDBTestCase):
"""Test Case for Block Device Mapping."""
def fake_ec2_vol_id_to_uuid(obj, ec2_id):
if ec2_id == 'vol-87654321':
return '22222222-3333-4444-5555-666666666666'
elif ec2_id == 'vol-98765432':
return '77777777-8888-9999-0000-aaaaaaaaaaaa'
else:
return 'OhNoooo'
def fake_ec2_snap_id_to_uuid(obj, ec2_id):
if ec2_id == 'snap-12345678':
return '00000000-1111-2222-3333-444444444444'
elif ec2_id == 'snap-23456789':
return '11111111-2222-3333-4444-555555555555'
else:
return 'OhNoooo'
def _assertApply(self, action, bdm_list):
for bdm, expected_result in bdm_list:
self.assertThat(action(bdm), matchers.DictMatches(expected_result))
def test_parse_block_device_mapping(self):
self.stubs.Set(ec2utils,
'ec2_vol_id_to_uuid',
self.fake_ec2_vol_id_to_uuid)
self.stubs.Set(ec2utils,
'ec2_snap_id_to_uuid',
self.fake_ec2_snap_id_to_uuid)
bdm_list = [
({'device_name': '/dev/fake0',
'ebs': {'snapshot_id': 'snap-12345678',
'volume_size': 1}},
{'device_name': '/dev/fake0',
'snapshot_id': '00000000-1111-2222-3333-444444444444',
'volume_size': 1,
'delete_on_termination': True}),
({'device_name': '/dev/fake1',
'ebs': {'snapshot_id': 'snap-23456789',
'delete_on_termination': False}},
{'device_name': '/dev/fake1',
'snapshot_id': '11111111-2222-3333-4444-555555555555',
'delete_on_termination': False}),
({'device_name': '/dev/fake2',
'ebs': {'snapshot_id': 'vol-87654321',
'volume_size': 2}},
{'device_name': '/dev/fake2',
'volume_id': '22222222-3333-4444-5555-666666666666',
'volume_size': 2,
'delete_on_termination': True}),
({'device_name': '/dev/fake3',
'ebs': {'snapshot_id': 'vol-98765432',
'delete_on_termination': False}},
{'device_name': '/dev/fake3',
'volume_id': '77777777-8888-9999-0000-aaaaaaaaaaaa',
'delete_on_termination': False}),
({'device_name': '/dev/fake4',
'ebs': {'no_device': True}},
{'device_name': '/dev/fake4',
'no_device': True}),
({'device_name': '/dev/fake5',
'virtual_name': 'ephemeral0'},
{'device_name': '/dev/fake5',
'virtual_name': 'ephemeral0'}),
({'device_name': '/dev/fake6',
'virtual_name': 'swap'},
{'device_name': '/dev/fake6',
'virtual_name': 'swap'}),
]
self._assertApply(cloud._parse_block_device_mapping, bdm_list)
def test_format_block_device_mapping(self):
bdm_list = [
({'device_name': '/dev/fake0',
'snapshot_id': 0x12345678,
'volume_size': 1,
'delete_on_termination': True},
{'deviceName': '/dev/fake0',
'ebs': {'snapshotId': 'snap-12345678',
'volumeSize': 1,
'deleteOnTermination': True}}),
({'device_name': '/dev/fake1',
'snapshot_id': 0x23456789},
{'deviceName': '/dev/fake1',
'ebs': {'snapshotId': 'snap-23456789'}}),
({'device_name': '/dev/fake2',
'snapshot_id': 0x23456789,
'delete_on_termination': False},
{'deviceName': '/dev/fake2',
'ebs': {'snapshotId': 'snap-23456789',
'deleteOnTermination': False}}),
({'device_name': '/dev/fake3',
'volume_id': 0x12345678,
'volume_size': 1,
'delete_on_termination': True},
{'deviceName': '/dev/fake3',
'ebs': {'snapshotId': 'vol-12345678',
'volumeSize': 1,
'deleteOnTermination': True}}),
({'device_name': '/dev/fake4',
'volume_id': 0x23456789},
{'deviceName': '/dev/fake4',
'ebs': {'snapshotId': 'vol-23456789'}}),
({'device_name': '/dev/fake5',
'volume_id': 0x23456789,
'delete_on_termination': False},
{'deviceName': '/dev/fake5',
'ebs': {'snapshotId': 'vol-23456789',
'deleteOnTermination': False}}),
]
self._assertApply(cloud._format_block_device_mapping, bdm_list)
def test_format_mapping(self):
properties = {
'mappings': [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': 'sdb1'},
{'virtual': 'swap',
'device': 'sdb2'},
{'virtual': 'swap',
'device': 'sdb3'},
{'virtual': 'swap',
'device': 'sdb4'},
{'virtual': 'ephemeral0',
'device': 'sdc1'},
{'virtual': 'ephemeral1',
'device': 'sdc2'},
{'virtual': 'ephemeral2',
'device': 'sdc3'},
],
'block_device_mapping': [
# root
{'device_name': '/dev/sda1',
'snapshot_id': 0x12345678,
'delete_on_termination': False},
# overwrite swap
{'device_name': '/dev/sdb2',
'snapshot_id': 0x23456789,
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'snapshot_id': 0x3456789A},
{'device_name': '/dev/sdb4',
'no_device': True},
# overwrite ephemeral
{'device_name': '/dev/sdc2',
'snapshot_id': 0x3456789A,
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'snapshot_id': 0x456789AB},
{'device_name': '/dev/sdc4',
'no_device': True},
# volume
{'device_name': '/dev/sdd1',
'snapshot_id': 0x87654321,
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'snapshot_id': 0x98765432},
{'device_name': '/dev/sdd3',
'snapshot_id': 0xA9875463},
{'device_name': '/dev/sdd4',
'no_device': True}]}
expected_result = {
'blockDeviceMapping': [
# root
{'deviceName': '/dev/sda1',
'ebs': {'snapshotId': 'snap-12345678',
'deleteOnTermination': False}},
# swap
{'deviceName': '/dev/sdb1',
'virtualName': 'swap'},
{'deviceName': '/dev/sdb2',
'ebs': {'snapshotId': 'snap-23456789',
'deleteOnTermination': False}},
{'deviceName': '/dev/sdb3',
'ebs': {'snapshotId': 'snap-3456789a'}},
# ephemeral
{'deviceName': '/dev/sdc1',
'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdc2',
'ebs': {'snapshotId': 'snap-3456789a',
'deleteOnTermination': False}},
{'deviceName': '/dev/sdc3',
'ebs': {'snapshotId': 'snap-456789ab'}},
# volume
{'deviceName': '/dev/sdd1',
'ebs': {'snapshotId': 'snap-87654321',
'deleteOnTermination': False}},
{'deviceName': '/dev/sdd2',
'ebs': {'snapshotId': 'snap-98765432'}},
{'deviceName': '/dev/sdd3',
'ebs': {'snapshotId': 'snap-a9875463'}}]}
result = {}
cloud._format_mappings(properties, result)
self.assertEqual(result['blockDeviceMapping'].sort(),
expected_result['blockDeviceMapping'].sort()) | unknown | codeparrot/codeparrot-clean | ||
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Cryptographic API.
*
* Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
*/
#ifndef _LOCAL_CRYPTO_HASH_H
#define _LOCAL_CRYPTO_HASH_H
#include <crypto/internal/hash.h>
#include "internal.h"
extern const struct crypto_type crypto_shash_type;
int hash_prepare_alg(struct hash_alg_common *alg);
#endif /* _LOCAL_CRYPTO_HASH_H */ | c | github | https://github.com/torvalds/linux | crypto/hash.h |
from __future__ import absolute_import
from __future__ import unicode_literals
from functools import reduce
import six
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
class Container(object):
"""
Represents a Docker container, constructed from the output of
GET /containers/:id:/json.
"""
def __init__(self, client, dictionary, has_been_inspected=False):
self.client = client
self.dictionary = dictionary
self.has_been_inspected = has_been_inspected
self.log_stream = None
@classmethod
def from_ps(cls, client, dictionary, **kwargs):
"""
Construct a container object from the output of GET /containers/json.
"""
name = get_container_name(dictionary)
if name is None:
return None
new_dictionary = {
'Id': dictionary['Id'],
'Image': dictionary['Image'],
'Name': '/' + name,
}
return cls(client, new_dictionary, **kwargs)
@classmethod
def from_id(cls, client, id):
return cls(client, client.inspect_container(id), has_been_inspected=True)
@classmethod
def create(cls, client, **options):
response = client.create_container(**options)
return cls.from_id(client, response['Id'])
@property
def id(self):
return self.dictionary['Id']
@property
def image(self):
return self.dictionary['Image']
@property
def image_config(self):
return self.client.inspect_image(self.image)
@property
def short_id(self):
return self.id[:12]
@property
def name(self):
return self.dictionary['Name'][1:]
@property
def service(self):
return self.labels.get(LABEL_SERVICE)
@property
def name_without_project(self):
project = self.labels.get(LABEL_PROJECT)
if self.name.startswith('{0}_{1}'.format(project, self.service)):
return '{0}_{1}'.format(self.service, self.number)
else:
return self.name
@property
def number(self):
number = self.labels.get(LABEL_CONTAINER_NUMBER)
if not number:
raise ValueError("Container {0} does not have a {1} label".format(
self.short_id, LABEL_CONTAINER_NUMBER))
return int(number)
@property
def ports(self):
self.inspect_if_not_inspected()
return self.get('NetworkSettings.Ports') or {}
@property
def human_readable_ports(self):
def format_port(private, public):
if not public:
return private
return '{HostIp}:{HostPort}->{private}'.format(
private=private, **public[0])
return ', '.join(format_port(*item)
for item in sorted(six.iteritems(self.ports)))
@property
def labels(self):
return self.get('Config.Labels') or {}
@property
def stop_signal(self):
return self.get('Config.StopSignal')
@property
def log_config(self):
return self.get('HostConfig.LogConfig') or None
@property
def human_readable_state(self):
if self.is_paused:
return 'Paused'
if self.is_restarting:
return 'Restarting'
if self.is_running:
return 'Ghost' if self.get('State.Ghost') else 'Up'
else:
return 'Exit %s' % self.get('State.ExitCode')
@property
def human_readable_command(self):
entrypoint = self.get('Config.Entrypoint') or []
cmd = self.get('Config.Cmd') or []
return ' '.join(entrypoint + cmd)
@property
def environment(self):
def parse_env(var):
if '=' in var:
return var.split("=", 1)
return var, None
return dict(parse_env(var) for var in self.get('Config.Env') or [])
@property
def exit_code(self):
return self.get('State.ExitCode')
@property
def is_running(self):
return self.get('State.Running')
@property
def is_restarting(self):
return self.get('State.Restarting')
@property
def is_paused(self):
return self.get('State.Paused')
@property
def log_driver(self):
return self.get('HostConfig.LogConfig.Type')
@property
def has_api_logs(self):
log_type = self.log_driver
return not log_type or log_type != 'none'
def attach_log_stream(self):
"""A log stream can only be attached if the container uses a json-file
log driver.
"""
if self.has_api_logs:
self.log_stream = self.attach(stdout=True, stderr=True, stream=True)
def get(self, key):
"""Return a value from the container or None if the value is not set.
:param key: a string using dotted notation for nested dictionary
lookups
"""
self.inspect_if_not_inspected()
def get_value(dictionary, key):
return (dictionary or {}).get(key)
return reduce(get_value, key.split('.'), self.dictionary)
def get_local_port(self, port, protocol='tcp'):
port = self.ports.get("%s/%s" % (port, protocol))
return "{HostIp}:{HostPort}".format(**port[0]) if port else None
def get_mount(self, mount_dest):
for mount in self.get('Mounts'):
if mount['Destination'] == mount_dest:
return mount
return None
def start(self, **options):
return self.client.start(self.id, **options)
def stop(self, **options):
return self.client.stop(self.id, **options)
def pause(self, **options):
return self.client.pause(self.id, **options)
def unpause(self, **options):
return self.client.unpause(self.id, **options)
def kill(self, **options):
return self.client.kill(self.id, **options)
def restart(self, **options):
return self.client.restart(self.id, **options)
def remove(self, **options):
return self.client.remove_container(self.id, **options)
def create_exec(self, command, **options):
return self.client.exec_create(self.id, command, **options)
def start_exec(self, exec_id, **options):
return self.client.exec_start(exec_id, **options)
def rename_to_tmp_name(self):
"""Rename the container to a hopefully unique temporary container name
by prepending the short id.
"""
self.client.rename(
self.id,
'%s_%s' % (self.short_id, self.name)
)
def inspect_if_not_inspected(self):
if not self.has_been_inspected:
self.inspect()
def wait(self):
return self.client.wait(self.id)
def logs(self, *args, **kwargs):
return self.client.logs(self.id, *args, **kwargs)
def inspect(self):
self.dictionary = self.client.inspect_container(self.id)
self.has_been_inspected = True
return self.dictionary
def attach(self, *args, **kwargs):
return self.client.attach(self.id, *args, **kwargs)
def __repr__(self):
return '<Container: %s (%s)>' % (self.name, self.id[:6])
def __eq__(self, other):
if type(self) != type(other):
return False
return self.id == other.id
def __hash__(self):
return self.id.__hash__()
def get_container_name(container):
if not container.get('Name') and not container.get('Names'):
return None
# inspect
if 'Name' in container:
return container['Name']
# ps
shortest_name = min(container['Names'], key=lambda n: len(n.split('/')))
return shortest_name.split('/')[-1] | unknown | codeparrot/codeparrot-clean | ||
from __future__ import division
import numbers
import re
import ezodf
import numpy as np
import logging
import typing
from openglider.airfoil import BezierProfile2D, Profile2D
from openglider.vector.spline import Bezier, SymmetricBezier, SymmetricBSpline
from openglider.vector import Interpolation
from openglider.glider.parametric.arc import ArcCurve
from openglider.glider.parametric.shape import ParametricShape
from openglider.glider.parametric.lines import UpperNode2D, LowerNode2D, BatchNode2D, Line2D, LineSet2D
from openglider.glider.rib import MiniRib
from openglider.glider.ballooning import BallooningBezier, BallooningBezierNeu
from openglider.utils.table import Table
logger = logging.getLogger(__name__)
element_keywords = {
"cuts": ["cells", "left", "right", "type"],
"a": "",
}
def filter_elements_from_table(table: Table, key: str, length: int):
new_table = Table()
for column in range(table.num_columns):
if table[0, column] == key:
new_table.append_right(table.get_columns(column, column+length-1))
return new_table
def import_ods_2d(Glider2D, filename, numpoints=4, calc_lineset_nodes=False):
logger.info(f"Import file: {filename}")
ods = ezodf.opendoc(filename)
sheets = ods.sheets
tables = Table.load(filename)
cell_sheet = tables[1]
rib_sheet = tables[2]
# file-version
file_version_match = re.match(r"V([0-9]*)", str(cell_sheet["A1"]))
if file_version_match:
file_version = int(file_version_match.group(1))
else:
file_version = 1
logger.info(f"Loading file version {file_version}")
# ------------
# profiles = [BezierProfile2D(profile) for profile in transpose_columns(sheets[3])]
profiles = [Profile2D(profile, name) for name, profile in transpose_columns(sheets[3])]
for foil in profiles:
foil.normalize()
if file_version > 2:
has_center_cell = not tables[0][0, 0] == 0
cell_no = (tables[0].num_rows - 2) * 2 + has_center_cell
geometry = get_geometry_parametric(tables[5], cell_no)
else:
geometry = get_geometry_explicit(sheets[0])
has_center_cell = geometry["shape"].has_center_cell
balloonings = []
for i, (name, baloon) in enumerate(transpose_columns(sheets[4])):
ballooning_type = str(sheets[4][0,2*i+1].value).upper()
if baloon:
if ballooning_type == "V1":
i = 0
while baloon[i + 1][0] > baloon[i][0]:
i += 1
upper = baloon[:i + 1]
lower = [(x, -y) for x, y in baloon[i + 1:]]
ballooning = BallooningBezier(upper, lower, name=name)
balloonings.append(BallooningBezierNeu.from_classic(ballooning))
elif ballooning_type == "V2":
i = 0
while baloon[i + 1][0] > baloon[i][0]:
i += 1
upper = baloon[:i + 1]
lower = baloon[i + 1:]
ballooning = BallooningBezier(upper, lower, name=name)
balloonings.append(BallooningBezierNeu.from_classic(ballooning))
elif ballooning_type == "V3":
balloonings.append(BallooningBezierNeu(baloon))
else:
raise ValueError("No ballooning type specified")
data = {}
datasheet = tables[-1]
for row in range(datasheet.num_rows):
name = datasheet[row, 0]
if name:
data[name] = datasheet[row, 1]
attachment_points_cell_table = filter_elements_from_table(cell_sheet, "ATP", 4)
attachment_points_cell_table.append_right(filter_elements_from_table(cell_sheet, "AHP", 4))
attachment_points_rib_table = filter_elements_from_table(rib_sheet, "AHP", 3)
attachment_points_rib_table.append_right(filter_elements_from_table(rib_sheet, "ATP", 3))
attachment_points = LineSet2D.read_attachment_point_table(
cell_table=attachment_points_cell_table,
rib_table=attachment_points_rib_table,
half_cell_no=geometry["shape"].half_cell_num
)
attachment_points = {n.name: n for n in attachment_points}
attachment_points_lower = get_lower_aufhaengepunkte(data)
def get_grouped_elements(sheet, names, keywords):
group_kw = keywords[0]
elements = []
for name in names:
elements += read_elements(sheet, name, len_data=len(keywords)-1)
element_dct = to_dct(elements, keywords)
return group(element_dct, group_kw)
# RIB HOLES
rib_hole_keywords = ["ribs", "pos", "size"]
rib_holes = read_elements(rib_sheet, "QUERLOCH", len_data=2)
rib_holes += read_elements(rib_sheet, "HOLE", len_data=2)
rib_holes = to_dct(rib_holes, rib_hole_keywords)
rib_holes = group(rib_holes, "ribs")
rigidfoil_keywords = ["ribs", "start", "end", "distance"]
rigidfoils = read_elements(rib_sheet, "RIGIDFOIL", len_data=3)
rigidfoils = to_dct(rigidfoils, rigidfoil_keywords)
rigidfoils = group(rigidfoils, "ribs")
cell_rigidfoils = get_grouped_elements(
cell_sheet,
["RIGIDFOIL"],
["cells", "x_start", "x_end", "y"]
)
# CUTS
def get_cuts(names, target_name):
objs = []
for name_src in names:
objs += read_elements(cell_sheet, name_src, len_data=2)
cuts_this = [{"cells": cut[0], "left": float(cut[1]), "right": float(cut[2]), "type": target_name} for cut in
objs]
return group(cuts_this, "cells")
cuts = get_cuts(["EKV", "EKH", "folded"], "folded")
cuts += get_cuts(["DESIGNM", "DESIGNO", "orthogonal"], "orthogonal")
cuts += get_cuts(["CUT3D", "cut_3d"], "cut_3d")
cuts += get_cuts(["singleskin"], "singleskin")
# Diagonals: center_left, center_right, width_l, width_r, height_l, height_r
diagonals = []
for res in read_elements(cell_sheet, "QR", len_data=6):
height1 = res[5]
height2 = res[6]
# migration
if file_version == 1:
# height (0,1) -> (-1,1)
height1 = height1 * 2 - 1
height2 = height2 * 2 - 1
# ---------
diagonals.append({"left_front": (res[1] - res[3] / 2, height1),
"left_back": (res[1] + res[3] / 2, height1),
"right_front": (res[2] - res[4] / 2, height2),
"right_back": (res[2] + res[4] / 2, height2),
"cells": res[0]})
diagonals = group(diagonals, "cells")
straps = []
straps_keywords = ["cells", "left", "right"]
for res in read_elements(cell_sheet, "VEKTLAENGE", len_data=2):
straps.append({
"left": res[1],
"right": res[2],
"width": 0.02,
"cells": res[0]
})
for res in read_elements(cell_sheet, "STRAP", len_data=3):
# [cell_no, x_left, x_right, width]
straps.append({
"left": res[1],
"right": res[2],
"width": res[3],
"cells": res[0]
})
straps = group(straps, "cells")
materials = get_material_codes(cell_sheet)
# minirib -> y, start (x)
miniribs = []
for minirib in read_elements(cell_sheet, "MINIRIB", len_data=2):
miniribs.append({
"yvalue": minirib[1],
"front_cut": minirib[2],
"cells": minirib[0]
})
miniribs = group(miniribs, "cells")
lineset_table = tables[6]
lineset = LineSet2D.read_input_table(lineset_table, attachment_points_lower, attachment_points)
glider_2d = Glider2D(elements={"cuts": cuts,
"holes": rib_holes,
"diagonals": diagonals,
"rigidfoils": rigidfoils,
"cell_rigidfoils": cell_rigidfoils,
"straps": straps,
"materials": materials,
"miniribs": miniribs},
profiles=profiles,
balloonings=balloonings,
lineset=lineset,
speed=data["SPEED"],
glide=data["GLIDE"],
**geometry)
if calc_lineset_nodes:
glider_3d = glider_2d.get_glider_3d()
glider_2d.lineset.set_default_nodes2d_pos(glider_3d)
return glider_2d
def get_geometry_explicit(sheet):
# All Lists
front = []
back = []
cell_distribution = []
aoa = []
arc = []
profile_merge = []
ballooning_merge = []
zrot = []
y = z = span_last = alpha = 0.
for i in range(1, sheet.nrows()):
line = [sheet.get_cell([i, j]).value for j in range(sheet.ncols())]
if not line[0]:
break # skip empty line
if not all(isinstance(c, numbers.Number) for c in line[:10]):
raise ValueError("Invalid row ({}): {}".format(i, line))
# Index, Choord, Span(x_2d), Front(y_2d=x_3d), d_alpha(next), aoa,
chord = line[1]
span = line[2]
x = line[3]
y += np.cos(alpha) * (span - span_last)
z -= np.sin(alpha) * (span - span_last)
alpha += line[4] * np.pi / 180 # angle after the rib
aoa.append([span, line[5] * np.pi / 180])
arc.append([y, z])
front.append([span, -x])
back.append([span, -x - chord])
cell_distribution.append([span, i - 1])
profile_merge.append([span, line[8]])
ballooning_merge.append([span, line[9]])
zrot.append([span, line[7] * np.pi / 180])
span_last = span
def symmetric_fit(data, bspline=True):
not_from_center = int(data[0][0] == 0)
mirrored = [[-p[0], p[1]] for p in data[not_from_center:]][::-1] + data
if bspline:
return SymmetricBSpline.fit(mirrored)
else:
return SymmetricBezier.fit(mirrored)
has_center_cell = not front[0][0] == 0
cell_no = (len(front) - 1) * 2 + has_center_cell
start = (2 - has_center_cell) / cell_no
const_arr = [0.] + np.linspace(start, 1, len(front) - (not has_center_cell)).tolist()
rib_pos = [0.] + [p[0] for p in front[not has_center_cell:]]
rib_pos_int = Interpolation(zip(rib_pos, const_arr))
rib_distribution = [[i, rib_pos_int(i)] for i in np.linspace(0, rib_pos[-1], 30)]
rib_distribution = Bezier.fit(rib_distribution)
parametric_shape = ParametricShape(symmetric_fit(front), symmetric_fit(back), rib_distribution, cell_no)
arc_curve = ArcCurve(symmetric_fit(arc))
return {
"shape": parametric_shape,
"arc": arc_curve,
"aoa": symmetric_fit(aoa),
"zrot": symmetric_fit(zrot),
"profile_merge_curve": symmetric_fit(profile_merge, bspline=True),
"ballooning_merge_curve": symmetric_fit(ballooning_merge, bspline=True)
}
def get_geometry_parametric(table: Table, cell_num):
data = {}
for key in ("front", "back", "rib_distribution", "arc", "zrot", "aoa", "profile_merge_curve", "ballooning_merge_curve"):
column = None
for col in range(table.num_columns):
if table[0, col] == key:
column = col
if column is not None:
points = []
for row in range(1, table.num_rows):
if table[row, column] is not None:
points.append([table[row, column], table[row, column+1]])
data[key] = points
parametric_shape = ParametricShape(
SymmetricBSpline(data["front"]),
SymmetricBSpline(data["back"]),
Bezier(data["rib_distribution"]),
cell_num
)
arc_curve = ArcCurve(SymmetricBSpline(data["arc"]))
return {
"shape": parametric_shape,
"arc": arc_curve,
"aoa": SymmetricBSpline(data["aoa"]),
"zrot": SymmetricBSpline(data["zrot"]),
"profile_merge_curve": SymmetricBSpline(data["profile_merge_curve"]),
"ballooning_merge_curve": SymmetricBSpline(data["ballooning_merge_curve"])
}
def get_material_codes(sheet):
materials = read_elements(sheet, "MATERIAL", len_data=1)
i = 0
ret = []
while materials:
codes = [el[1] for el in materials if el[0] == i]
materials = [el for el in materials if el[0] != i]
ret.append(codes)
i += 1
# cell_no, part_no, code
return ret
def get_lower_aufhaengepunkte(data):
aufhaengepunkte = {}
axis_to_index = {"X": 0, "Y": 1, "Z": 2}
regex = re.compile("AHP([XYZ])(.*)")
for key in data:
if key is not None:
res = regex.match(key)
if res:
axis, pos = res.groups()
aufhaengepunkte.setdefault(pos, [0, 0, 0])
aufhaengepunkte[pos][axis_to_index[axis]] = data[key]
return {name: LowerNode2D([0, 0], pos, name)
for name, pos in aufhaengepunkte.items()}
def transpose_columns(sheet, columnswidth=2):
num_columns = sheet.ncols()
num_elems = num_columns // columnswidth
# if num % columnswidth > 0:
# raise ValueError("irregular columnswidth")
result = []
for col in range(num_elems):
first_column = col * columnswidth
last_column = (col + 1) * columnswidth
columns = range(first_column, last_column)
name = sheet[0, first_column].value
if not isinstance(name, numbers.Number): # py2/3: str!=unicode
start = 1
else:
name = "unnamed"
start = 0
element = []
for i in range(start, sheet.nrows()):
row = [sheet[i, j].value for j in columns]
if all([j is None for j in row]): # Break at empty line
break
if not all([isinstance(j, numbers.Number) for j in row]):
raise ValueError("Invalid value at row {}: {}".format(i, row))
element.append(row)
result.append((name, element))
return result
def read_elements(sheet: Table, keyword, len_data=2):
"""
Return rib/cell_no for the element + data
-> read_elements(sheet, "AHP", 2) -> [ [rib_no, id, x], ...]
"""
elements = []
column = 0
while column < sheet.num_columns:
if sheet[0, column] == keyword:
for row in range(1, sheet.num_rows):
line = [sheet[row, column + k] for k in range(len_data)]
if line[0]:
line.insert(0, row-1)
elements.append(line)
column += len_data
else:
column += 1
return elements
def to_dct(elems, keywords):
return [{key: value for key, value in zip(keywords, elem)} for elem in elems]
def group(lst, keyword):
new_lst = []
def equal(first, second):
if first.keys() != second.keys():
return False
for key in first:
if key == keyword:
continue
if first[key] != second[key]:
return False
return True
def insert(_obj):
for obj2 in new_lst:
if equal(_obj, obj2):
obj2[keyword] += _obj[keyword]
return
# nothing found
new_lst.append(_obj)
for obj in lst:
# create a list to group
obj[keyword] = [obj[keyword]]
insert(obj)
return new_lst | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product
import stock_landed_costs
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#retriever
from retriever.lib.templates import DownloadOnlyTemplate
from pkg_resources import parse_version
try:
from retriever.lib.defaults import VERSION
except ImportError:
from retriever import VERSION
class main(DownloadOnlyTemplate):
def __init__(self, **kwargs):
DownloadOnlyTemplate.__init__(self, **kwargs)
self.title="Mammal Super Tree"
self.name='mammal-super-tree'
self.ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x'
self.citation="Fritz, S. A., Bininda-Emonds, O. R. P. and Purvis, A. (2009), Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters, 12: 538-549. doi:10.1111/j.1461-0248.2009.01307.x"
self.description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549"
self.retriever_minimum_version='2.0.dev'
self.version='1.2.1'
self.urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'}
if parse_version(VERSION) <= parse_version("2.0.0"):
self.shortname = self.name
self.name = self.title
SCRIPT = main() | unknown | codeparrot/codeparrot-clean | ||
from PeacockActor import PeacockActor
import vtk
from vtk.util.colors import peacock, tomato, red, white, black
class ClippedActor(PeacockActor):
def __init__(self, original_actor, plane):
PeacockActor.__init__(self, original_actor.renderer)
self.original_actor = original_actor
self.plane = plane
self.clipper = vtk.vtkTableBasedClipDataSet()
self.clipper.SetInput(self.original_actor.mesh)
self.clipper.SetClipFunction(self.plane)
self.clipper.Update()
self.clip_mapper = vtk.vtkDataSetMapper()
self.clip_mapper.SetInput(self.clipper.GetOutput())
self.clip_actor = vtk.vtkActor()
self.clip_actor.SetMapper(self.clip_mapper)
def getBounds(self):
return self.original_actor.getBounds()
def movePlane(self):
pass
def _show(self):
self.original_actor.renderer.AddActor(self.clip_actor)
def _hide(self):
self.original_actor.renderer.RemoveActor(self.clip_actor)
def _showEdges(self):
self.clip_actor.GetProperty().EdgeVisibilityOn()
def _hideEdges(self):
self.clip_actor.GetProperty().EdgeVisibilityOff()
def _goSolid(self):
self.clip_actor.GetProperty().SetRepresentationToSurface()
def _goWireframe(self):
self.clip_actor.GetProperty().SetRepresentationToWireframe()
def _setColor(self, color):
self.clip_actor.GetProperty().SetColor(color) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package server_test
import (
"context"
"fmt"
"sort"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/storage/fs"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
humanize "github.com/dustin/go-humanize"
"github.com/stretchr/testify/require"
)
// TestAddNewStoresToExistingNodes tests database behavior with
// multiple stores per node, in particular when new stores are
// added while nodes are shut down. This test starts a cluster with
// three nodes, shuts down all nodes and adds a store to each node,
// and ensures nodes start back up successfully. See #39415.
func TestAddNewStoresToExistingNodes(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Nine stores is a lot of goroutines.
skip.UnderStress(t, "too many new stores and nodes for stress")
skip.UnderRace(t, "too many new stores and nodes for race")
skip.UnderDeadlock(t, "too many new stores and nodes for deadlock")
ctx := context.Background()
ser := fs.NewStickyRegistry()
const (
numNodes = 3
numStoresPerNodeInitially = 1
numStoresPerNodeAfterRestart = 3
)
mkClusterArgs := func(numNodes, numStoresPerNode int) base.TestClusterArgs {
tcArgs := base.TestClusterArgs{
// NB: it's important that this test wait for full replication. Otherwise,
// with only a single voter on the range that allocates store IDs, it can
// pass erroneously. StartTestCluster already calls it, but we call it
// again explicitly.
ReplicationMode: base.ReplicationAuto,
ServerArgsPerNode: map[int]base.TestServerArgs{},
ServerArgs: base.TestServerArgs{
DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,
},
}
for srvIdx := 0; srvIdx < numNodes; srvIdx++ {
serverArgs := base.TestServerArgs{}
serverArgs.Knobs.Server = &server.TestingKnobs{StickyVFSRegistry: ser}
for storeIdx := 0; storeIdx < numStoresPerNode; storeIdx++ {
id := fmt.Sprintf("s%d.%d", srvIdx+1, storeIdx+1)
serverArgs.StoreSpecs = append(
serverArgs.StoreSpecs,
base.StoreSpec{InMemory: true, StickyVFSID: id},
)
}
tcArgs.ServerArgsPerNode[srvIdx] = serverArgs
}
return tcArgs
}
tc := testcluster.StartTestCluster(t, numNodes, mkClusterArgs(numNodes, numStoresPerNodeInitially))
clusterID := tc.Server(0).StorageClusterID()
tc.Stopper().Stop(ctx)
tcArgs := mkClusterArgs(numNodes, numStoresPerNodeAfterRestart)
tcArgs.ReplicationMode = base.ReplicationManual // saves time, ok now
// We need ParallelStart since this is an existing cluster. If
// we started sequentially, then the first node would hang forever
// waiting for the KV layer to become available, but that only
// happens when the second node also starts.
tcArgs.ParallelStart = true
// Start all nodes with additional stores.
tc = testcluster.StartTestCluster(t, numNodes, tcArgs)
defer tc.Stopper().Stop(ctx)
// Sanity check that we're testing what we wanted to test and didn't accidentally
// bootstrap three single-node clusters (who knows).
for _, srv := range tc.Servers {
require.Equal(t, clusterID, srv.StorageClusterID())
}
// Ensure all nodes have all stores available, and each store has a unique
// store ID.
testutils.SucceedsSoon(t, func() error {
var storeIDs []roachpb.StoreID
for _, server := range tc.Servers {
var storeCount = 0
if err := server.GetStores().(*kvserver.Stores).VisitStores(
func(s *kvserver.Store) error {
storeCount++
storeIDs = append(storeIDs, s.StoreID())
return nil
},
); err != nil {
return errors.Wrap(err, "failed to visit all nodes")
}
if storeCount != 3 {
return errors.Errorf("expected 3 stores to be available on n%s, got %d stores instead", server.NodeID(), storeCount)
}
}
sort.Slice(storeIDs, func(i, j int) bool {
return storeIDs[i] < storeIDs[j]
})
for i := range storeIDs {
expStoreID := roachpb.StoreID(i + 1)
if storeIDs[i] != expStoreID {
t.Fatalf("expected the %s store to have storeID s%s, found s%s", humanize.Ordinal(i+1), expStoreID, storeIDs[i])
}
}
return nil
})
}
// TestMultiStoreIDAlloc validates that we don't accidentally re-use or
// skip-over allocated store IDs in multi-store setups.
func TestMultiStoreIDAlloc(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderStress(t, "too many new stores and nodes for stress")
ctx := context.Background()
numNodes := 3
numStoresPerNode := 3
var storeSpecs []base.StoreSpec
for i := 0; i < numStoresPerNode; i++ {
storeSpecs = append(storeSpecs, base.StoreSpec{InMemory: true})
}
tcArgs := base.TestClusterArgs{
ParallelStart: true,
ReplicationMode: base.ReplicationManual, // saves time
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: storeSpecs},
1: {StoreSpecs: storeSpecs},
2: {StoreSpecs: storeSpecs},
},
}
tc := testcluster.StartTestCluster(t, numNodes, tcArgs)
defer tc.Stopper().Stop(ctx)
// Sanity check that we're testing what we wanted to test and didn't accidentally
// bootstrap three single-node clusters (who knows).
clusterID := tc.Server(0).StorageClusterID()
for _, srv := range tc.Servers {
require.Equal(t, clusterID, srv.StorageClusterID())
}
// Ensure all nodes have all stores available, and each store has a unique
// store ID.
testutils.SucceedsSoon(t, func() error {
var storeIDs []roachpb.StoreID
for _, server := range tc.Servers {
var storeCount = 0
if err := server.GetStores().(*kvserver.Stores).VisitStores(
func(s *kvserver.Store) error {
storeCount++
storeIDs = append(storeIDs, s.StoreID())
return nil
},
); err != nil {
return errors.Wrap(err, "failed to visit all nodes")
}
if storeCount != numStoresPerNode {
return errors.Errorf("expected %d stores to be available on n%s, got %d stores instead",
numStoresPerNode, server.NodeID(), storeCount)
}
}
sort.Slice(storeIDs, func(i, j int) bool {
return storeIDs[i] < storeIDs[j]
})
for i := range storeIDs {
expStoreID := roachpb.StoreID(i + 1)
if storeIDs[i] != expStoreID {
t.Fatalf("expected the %s store to have storeID s%s, found s%s", humanize.Ordinal(i+1), expStoreID, storeIDs[i])
}
}
return nil
})
} | go | github | https://github.com/cockroachdb/cockroach | pkg/server/multi_store_test.go |
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Manager runs a series of tests (TestType interface) against a set
of test files. If a test file fails a TestType, it returns a list of TestFailure
objects to the Manager. The Manager then aggregates the TestFailures to
create a final report.
"""
import json
import logging
import random
import sys
import time
from collections import defaultdict
from webkitpy.common.checkout.scm.detection import SCMDetector
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_layout_results_generator
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_input import TestInput
from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATUS
from webkitpy.tool.grammar import pluralize
_log = logging.getLogger(__name__)
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
def __init__(self, port, options, printer):
"""Initialize test runner data structures.
Args:
port: an object implementing port-specific
options: a dictionary of command line options
printer: a Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root()
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
self._results_directory = self._port.results_directory()
self._finder = LayoutTestFinder(self._port, self._options)
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
test_options_json_path = self._port.path_from_webkit_base(self.LAYOUT_TESTS_DIRECTORY, "tests-options.json")
self._tests_options = json.loads(self._filesystem.read_text_file(test_options_json_path)) if self._filesystem.exists(test_options_json_path) else {}
def _collect_tests(self, args):
return self._finder.find_tests(self._options, args)
def _is_http_test(self, test):
return self.HTTP_SUBDIR in test or self._is_websocket_test(test) or self._is_web_platform_test(test)
def _is_websocket_test(self, test):
return self.WEBSOCKET_SUBDIR in test
def _is_web_platform_test(self, test):
return self.web_platform_test_subdir in test
def _custom_device_for_test(self, test):
for device_class in self._port.CUSTOM_DEVICE_CLASSES:
directory_suffix = device_class + self._port.TEST_PATH_SEPARATOR
if directory_suffix in test:
return device_class
return None
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
tests_to_run = [test for test in test_names if test not in tests_to_skip]
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
random.shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
return TestInput(test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._is_http_test(test_file),
should_dump_jsconsolelog_in_stderr=self._test_should_dump_jsconsolelog_in_stderr(test_file))
def _test_is_slow(self, test_file):
if self._expectations.model().has_modifier(test_file, test_expectations.SLOW):
return True
return "slow" in self._tests_options.get(test_file, [])
def _test_should_dump_jsconsolelog_in_stderr(self, test_file):
return self._expectations.model().has_modifier(test_file, test_expectations.DUMPJSCONSOLELOGINSTDERR)
def needs_servers(self, test_names):
return any(self._is_http_test(test_name) for test_name in test_names) and self._options.http
def _get_test_inputs(self, tests_to_run, repeat_each, iterations):
test_inputs = []
for _ in xrange(iterations):
for test in tests_to_run:
for _ in xrange(repeat_each):
test_inputs.append(self._test_input_for_file(test))
return test_inputs
def _update_worker_count(self, test_names):
test_inputs = self._get_test_inputs(test_names, self._options.repeat_each, self._options.iterations)
worker_count = self._runner.get_worker_count(test_inputs, int(self._options.child_processes))
self._options.child_processes = worker_count
def _set_up_run(self, test_names, device_class=None):
self._printer.write_update("Checking build ...")
if not self._port.check_build(self.needs_servers(test_names)):
_log.error("Build check failed")
return False
self._options.device_class = device_class
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
self._printer.write_update("Starting helper ...")
if not self._port.start_helper(self._options.pixel_tests):
return False
self._update_worker_count(test_names)
self._port.reset_preferences()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update("Checking system dependencies ...")
if not self._port.check_sys_deps(self.needs_servers(test_names)):
self._port.stop_helper()
return False
if self._options.clobber_old_results:
self._clobber_old_results()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(self._results_directory)
self._port.setup_test_run(self._options.device_class)
return True
def run(self, args):
"""Run the tests and return a RunDetails object with the results."""
self._printer.write_update("Collecting tests ...")
try:
paths, test_names = self._collect_tests(args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=-1)
self._printer.write_update("Parsing expectations ...")
self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
self._expectations.parse_all_expectations()
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
start_time = time.time()
# Check to make sure we're not skipping every test.
if not tests_to_run:
_log.critical('No tests to run.')
return test_run_results.RunDetails(exit_code=-1)
default_device_tests = []
# Look for tests with custom device requirements.
custom_device_tests = defaultdict(list)
for test_file in tests_to_run:
custom_device = self._custom_device_for_test(test_file)
if custom_device:
custom_device_tests[custom_device].append(test_file)
else:
default_device_tests.append(test_file)
if custom_device_tests:
for device_class in custom_device_tests:
_log.debug('{} tests use device {}'.format(len(custom_device_tests[device_class]), device_class))
initial_results = None
retry_results = None
enabled_pixel_tests_in_retry = False
if default_device_tests:
_log.info('')
_log.info("Running %s", pluralize(len(tests_to_run), "test"))
_log.info('')
if not self._set_up_run(tests_to_run):
return test_run_results.RunDetails(exit_code=-1)
initial_results, retry_results, enabled_pixel_tests_in_retry = self._run_test_subset(default_device_tests, tests_to_skip)
for device_class in custom_device_tests:
device_tests = custom_device_tests[device_class]
if device_tests:
_log.info('')
_log.info('Running %s for %s', pluralize(len(device_tests), "test"), device_class)
_log.info('')
if not self._set_up_run(device_tests, device_class):
return test_run_results.RunDetails(exit_code=-1)
device_initial_results, device_retry_results, device_enabled_pixel_tests_in_retry = self._run_test_subset(device_tests, tests_to_skip)
initial_results = initial_results.merge(device_initial_results) if initial_results else device_initial_results
retry_results = retry_results.merge(device_retry_results) if retry_results else device_retry_results
enabled_pixel_tests_in_retry |= device_enabled_pixel_tests_in_retry
end_time = time.time()
return self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
def _run_test_subset(self, tests_to_run, tests_to_skip):
try:
enabled_pixel_tests_in_retry = False
initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, int(self._options.child_processes), retrying=False)
tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
# Don't retry failures when interrupted by user or failures limit exception.
retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
if retry_failures and tests_to_retry:
enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
_log.info('')
_log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
_log.info('')
retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1, num_workers=1, retrying=True)
if enabled_pixel_tests_in_retry:
self._options.pixel_tests = False
else:
retry_results = None
finally:
self._clean_up_run()
return (initial_results, retry_results, enabled_pixel_tests_in_retry)
def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
_log.debug("looking for new crash logs")
self._look_for_new_crash_logs(initial_results, start_time)
if retry_results:
self._look_for_new_crash_logs(retry_results, start_time)
_log.debug("summarizing results")
summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
results_including_passes = None
if self._options.results_server_host:
results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
self._printer.print_results(end_time - start_time, initial_results, summarized_results)
exit_code = -1
if not self._options.dry_run:
self._port.print_leaks_summary()
self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
results_path = self._filesystem.join(self._results_directory, "results.html")
self._copy_results_html_file(results_path)
if initial_results.keyboard_interrupted:
exit_code = INTERRUPTED_EXIT_STATUS
else:
if self._options.show_results and (initial_results.unexpected_results_by_name or
(self._options.full_results_html and initial_results.total_failures)):
self._port.show_results_html_file(results_path)
exit_code = self._port.exit_code_from_summarized_results(summarized_results)
return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
needs_http = any((self._is_http_test(test) and not self._is_web_platform_test(test)) for test in tests_to_run)
needs_web_platform_test_server = any(self._is_web_platform_test(test) for test in tests_to_run)
needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
test_inputs = self._get_test_inputs(tests_to_run, repeat_each, iterations)
return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, needs_web_platform_test_server, retrying)
def _clean_up_run(self):
_log.debug("Flushing stdout")
sys.stdout.flush()
_log.debug("Flushing stderr")
sys.stderr.flush()
_log.debug("Stopping helper")
self._port.stop_helper()
_log.debug("Cleaning up port")
self._port.clean_up_test_run()
def _force_pixel_tests_if_needed(self):
if self._options.pixel_tests:
return False
_log.debug("Restarting helper")
self._port.stop_helper()
self._options.pixel_tests = True
return self._port.start_helper()
def _look_for_new_crash_logs(self, run_results, start_time):
"""Since crash logs can take a long time to be written out if the system is
under stress do a second pass at the end of the test run.
run_results: the results of the test run
start_time: time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
for test, result in run_results.unexpected_results_by_name.iteritems():
if (result.type != test_expectations.CRASH):
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash):
continue
crashed_processes.append([test, failure.process_name, failure.pid])
sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
if sample_files:
for test, sample_file in sample_files.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.copy_sample_file(sample_file)
crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
if crash_logs:
for test, crash_log in crash_logs.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
# Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
if not any(process[0] == test for process in crashed_processes):
result = test_results.TestResult(test)
result.type = test_expectations.CRASH
result.is_other_crash = True
run_results.add(result, expected=False, test_is_slow=False)
_log.debug("Adding results for other crash: " + str(test))
def _clobber_old_results(self):
# Just clobber the actual test results directories since the other
# files in the results directory are explicitly used for cross-run
# tracking.
self._printer.write_update("Clobbering old results in %s" %
self._results_directory)
layout_tests_dir = self._port.layout_tests_dir()
possible_dirs = self._port.test_dirs()
for dirname in possible_dirs:
if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
def _tests_to_retry(self, run_results, include_crashes):
return [result.test_name for result in run_results.unexpected_results_by_name.values() if
((result.type != test_expectations.PASS) and
(result.type != test_expectations.MISSING) and
(result.type != test_expectations.CRASH or include_crashes))]
def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
"""Writes the results of the test run as JSON files into the results
dir and upload the files to the appengine server.
Args:
summarized_results: dict of results
initial_results: full summary object
"""
_log.debug("Writing JSON files in %s." % self._results_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._results_directory, "stats.json")
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
# We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
if results_including_passes:
json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)
generator = json_layout_results_generator.JSONLayoutResultsGenerator(
self._port, self._options.builder_name, self._options.build_name,
self._options.build_number, self._results_directory,
self._expectations, initial_results,
self._options.test_results_server,
"layout-tests",
self._options.master_name)
if generator.generate_json_output():
_log.debug("Finished writing JSON file for the test results server.")
else:
_log.debug("Failed to generate JSON file for the test results server.")
return
json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
generator.upload_json_files(json_files)
if results_including_passes:
self.upload_results(results_json_path, start_time, end_time)
incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
# Remove these files from the results directory so they don't take up too much space on the buildbot.
# The tools use the version we uploaded to the results server anyway.
self._filesystem.remove(times_json_path)
self._filesystem.remove(incremental_results_path)
if results_including_passes:
self._filesystem.remove(results_json_path)
def upload_results(self, results_json_path, start_time, end_time):
hostname = self._options.results_server_host
if not hostname:
return
master_name = self._options.master_name
builder_name = self._options.builder_name
build_number = self._options.build_number
build_slave = self._options.build_slave
if not master_name or not builder_name or not build_number or not build_slave:
_log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
return
revisions = {}
# FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
for (name, path) in self._port.repository_paths():
scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
revision = scm.svn_revision(path)
revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}
_log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)
attrs = [
('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name), # FIXME: Pass in build.webkit.org.
('builder_name', builder_name),
('build_number', build_number),
('build_slave', build_slave),
('revisions', json.dumps(revisions)),
('start_time', str(start_time)),
('end_time', str(end_time)),
]
uploader = FileUploader("http://%s/api/report" % hostname, 360)
try:
response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
if not response:
_log.error("JSON upload failed; no response returned")
return
if response.code != 200:
_log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
return
response_text = response.read()
try:
response_json = json.loads(response_text)
except ValueError, error:
_log.error("JSON upload failed; failed to parse the response: %s", response_text)
return
if response_json['status'] != 'OK':
_log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
return
_log.info("JSON uploaded.")
except Exception, error:
_log.error("Upload failed: %s" % error)
return
def _copy_results_html_file(self, destination_path):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
results_file = self._filesystem.join(base_dir, 'results.html')
# Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
# so make sure it exists before we try to copy it.
if self._filesystem.exists(results_file):
self._filesystem.copyfile(results_file, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != test_expectations.SKIP:
stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
stats_trie = {}
for name, value in stats.iteritems():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
def _print_expectation_line_for_test(self, format_string, test):
line = self._expectations.model().get_expectation_line(test)
print format_string.format(test, line.expected_behavior, self._expectations.readable_filename_and_line_number(line), line.original_string or '')
def _print_expectations_for_subset(self, device_class, test_col_width, tests_to_run, tests_to_skip={}):
format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
if tests_to_skip:
print ''
print 'Tests to skip ({})'.format(len(tests_to_skip))
for test in sorted(tests_to_skip):
self._print_expectation_line_for_test(format_string, test)
print ''
print 'Tests to run{} ({})'.format(' for ' + device_class if device_class else '', len(tests_to_run))
for test in sorted(tests_to_run):
self._print_expectation_line_for_test(format_string, test)
def print_expectations(self, args):
self._printer.write_update("Collecting tests ...")
try:
paths, test_names = self._collect_tests(args)
except IOError:
# This is raised if --test-list doesn't exist
return -1
self._printer.write_update("Parsing expectations ...")
self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
self._expectations.parse_all_expectations()
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
test_col_width = len(max(tests_to_run + list(tests_to_skip), key=len)) + 1
default_device_tests = []
# Look for tests with custom device requirements.
custom_device_tests = defaultdict(list)
for test_file in tests_to_run:
custom_device = self._custom_device_for_test(test_file)
if custom_device:
custom_device_tests[custom_device].append(test_file)
else:
default_device_tests.append(test_file)
if custom_device_tests:
for device_class in custom_device_tests:
_log.debug('{} tests use device {}'.format(len(custom_device_tests[device_class]), device_class))
self._print_expectations_for_subset(None, test_col_width, tests_to_run, tests_to_skip)
for device_class in custom_device_tests:
device_tests = custom_device_tests[device_class]
self._print_expectations_for_subset(device_class, test_col_width, device_tests)
return 0 | unknown | codeparrot/codeparrot-clean | ||
import {
createApp,
defineComponent,
getCurrentInstance,
h,
nodeOps,
render,
shallowReadonly,
} from '@vue/runtime-test'
import type {
ComponentInternalInstance,
ComponentOptions,
} from '../src/component'
describe('component: proxy', () => {
test('data', () => {
let instance: ComponentInternalInstance
let instanceProxy: any
const Comp = {
data() {
return {
foo: 1,
}
},
mounted() {
instance = getCurrentInstance()!
instanceProxy = this
},
render() {
return null
},
}
render(h(Comp), nodeOps.createElement('div'))
expect(instanceProxy.foo).toBe(1)
instanceProxy.foo = 2
expect(instance!.data.foo).toBe(2)
})
test('setupState', () => {
let instance: ComponentInternalInstance
let instanceProxy: any
const Comp = {
setup() {
return {
foo: 1,
}
},
mounted() {
instance = getCurrentInstance()!
instanceProxy = this
},
render() {
return null
},
}
render(h(Comp), nodeOps.createElement('div'))
expect(instanceProxy.foo).toBe(1)
instanceProxy.foo = 2
expect(instance!.setupState.foo).toBe(2)
})
test('should not expose non-declared props', () => {
let instanceProxy: any
const Comp = {
setup() {
return () => null
},
mounted() {
instanceProxy = this
},
}
render(h(Comp, { count: 1 }), nodeOps.createElement('div'))
expect('count' in instanceProxy).toBe(false)
})
test('public properties', async () => {
let instance: ComponentInternalInstance
let instanceProxy: any
const Comp = {
setup() {
return () => null
},
mounted() {
instance = getCurrentInstance()!
instanceProxy = this
},
}
render(h(Comp), nodeOps.createElement('div'))
expect(instanceProxy.$data).toBe(instance!.data)
expect(instanceProxy.$props).toBe(shallowReadonly(instance!.props))
expect(instanceProxy.$attrs).toBe(shallowReadonly(instance!.attrs))
expect(instanceProxy.$slots).toBe(shallowReadonly(instance!.slots))
expect(instanceProxy.$refs).toBe(shallowReadonly(instance!.refs))
expect(instanceProxy.$parent).toBe(
instance!.parent && instance!.parent.proxy,
)
expect(instanceProxy.$root).toBe(instance!.root.proxy)
expect(instanceProxy.$emit).toBe(instance!.emit)
expect(instanceProxy.$el).toBe(instance!.vnode.el)
expect(instanceProxy.$options).toBe(instance!.type as ComponentOptions)
expect(() => (instanceProxy.$data = {})).toThrow(TypeError)
expect(`Attempting to mutate public property "$data"`).toHaveBeenWarned()
const nextTickThis = await instanceProxy.$nextTick(function (this: any) {
return this
})
expect(nextTickThis).toBe(instanceProxy)
})
test('user attached properties', async () => {
let instance: ComponentInternalInstance
let instanceProxy: any
const Comp = {
setup() {
return () => null
},
mounted() {
instance = getCurrentInstance()!
instanceProxy = this
},
}
render(h(Comp), nodeOps.createElement('div'))
instanceProxy.foo = 1
expect(instanceProxy.foo).toBe(1)
expect(instance!.ctx.foo).toBe(1)
// should also allow properties that start with $
const obj = (instanceProxy.$store = {})
expect(instanceProxy.$store).toBe(obj)
expect(instance!.ctx.$store).toBe(obj)
})
test('globalProperties', () => {
let instance: ComponentInternalInstance
let instanceProxy: any
const Comp = {
setup() {
return () => null
},
mounted() {
instance = getCurrentInstance()!
instanceProxy = this
},
}
const app = createApp(Comp)
app.config.globalProperties.foo = 1
app.mount(nodeOps.createElement('div'))
expect(instanceProxy.foo).toBe(1)
// set should overwrite globalProperties with local
instanceProxy.foo = 2
// expect(instanceProxy.foo).toBe(2)
expect(instance!.ctx.foo).toBe(2)
// should not affect global
expect(app.config.globalProperties.foo).toBe(1)
})
test('has check', () => {
let instanceProxy: any
const Comp = {
render() {},
props: {
msg: String,
},
data() {
return {
foo: 0,
$foo: 0,
}
},
computed: {
cmp: () => {
throw new Error('value of cmp should not be accessed')
},
$cmp: () => {
throw new Error('value of $cmp should not be read')
},
},
setup() {
return {
bar: 1,
}
},
__cssModules: {
$style: {},
cssStyles: {},
},
mounted() {
instanceProxy = this
},
}
const app = createApp(Comp, { msg: 'hello' })
app.config.globalProperties.global = 1
app.config.globalProperties.$global = 1
app.mount(nodeOps.createElement('div'))
// props
expect('msg' in instanceProxy).toBe(true)
// data
expect('foo' in instanceProxy).toBe(true)
expect('$foo' in instanceProxy).toBe(false)
// setupState
expect('bar' in instanceProxy).toBe(true)
// ctx
expect('cmp' in instanceProxy).toBe(true)
expect('$cmp' in instanceProxy).toBe(true)
// public properties
expect('$el' in instanceProxy).toBe(true)
// CSS modules
expect('$style' in instanceProxy).toBe(true)
expect('cssStyles' in instanceProxy).toBe(true)
// global properties
expect('global' in instanceProxy).toBe(true)
expect('$global' in instanceProxy).toBe(true)
// non-existent
expect('$foobar' in instanceProxy).toBe(false)
expect('baz' in instanceProxy).toBe(false)
// #4962 triggering getter should not cause non-existent property to
// pass the has check
instanceProxy.baz
instanceProxy.$baz
expect('baz' in instanceProxy).toBe(false)
expect('$baz' in instanceProxy).toBe(false)
// set non-existent (goes into proxyTarget sink)
instanceProxy.baz = 1
expect('baz' in instanceProxy).toBe(true)
instanceProxy.$baz = 1
expect('$baz' in instanceProxy).toBe(true)
// dev mode ownKeys check for console inspection
// should only expose own keys
expect(Object.keys(instanceProxy)).toMatchObject([
'msg',
'bar',
'foo',
'cmp',
'$cmp',
'baz',
'$baz',
])
})
test('allow updating proxy with Object.defineProperty', () => {
let instanceProxy: any
const Comp = {
render() {},
setup() {
return {
isDisplayed: true,
}
},
mounted() {
instanceProxy = this
},
}
const app = createApp(Comp)
app.mount(nodeOps.createElement('div'))
Object.defineProperty(instanceProxy, 'isDisplayed', { value: false })
expect(instanceProxy.isDisplayed).toBe(false)
Object.defineProperty(instanceProxy, 'isDisplayed', { value: true })
expect(instanceProxy.isDisplayed).toBe(true)
Object.defineProperty(instanceProxy, 'isDisplayed', {
get() {
return false
},
})
expect(instanceProxy.isDisplayed).toBe(false)
Object.defineProperty(instanceProxy, 'isDisplayed', {
get() {
return true
},
})
expect(instanceProxy.isDisplayed).toBe(true)
})
test('allow test runner spying on proxy methods with Object.defineProperty', () => {
// #5417
let instanceProxy: any
const Comp = {
render() {},
setup() {
return {
toggle() {
return 'a'
},
}
},
mounted() {
instanceProxy = this
},
}
const app = createApp(Comp)
app.mount(nodeOps.createElement('div'))
// access 'toggle' to ensure key is cached
const v1 = instanceProxy.toggle()
expect(v1).toEqual('a')
// reconfigure "toggle" to be getter based.
let getCalledTimes = 0
Object.defineProperty(instanceProxy, 'toggle', {
get() {
getCalledTimes++
return () => 'b'
},
})
// getter should not be evaluated on initial definition
expect(getCalledTimes).toEqual(0)
// invoke "toggle" after "defineProperty"
const v2 = instanceProxy.toggle()
expect(v2).toEqual('b')
expect(getCalledTimes).toEqual(1)
// expect toggle getter not to be cached. it can't be
instanceProxy.toggle()
expect(getCalledTimes).toEqual(2)
// attaching spy, triggers the getter once, and override the property.
// also uses Object.defineProperty
const spy = vi.spyOn(instanceProxy, 'toggle')
expect(getCalledTimes).toEqual(3)
const v3 = instanceProxy.toggle()
expect(v3).toEqual('b')
expect(spy).toHaveBeenCalled()
expect(getCalledTimes).toEqual(3)
})
test('defineProperty on proxy property with value descriptor', () => {
// #5417
let instanceProxy: any
const Comp = {
render() {},
setup() {
return {
toggle: 'a',
}
},
mounted() {
instanceProxy = this
},
}
const app = createApp(Comp)
app.mount(nodeOps.createElement('div'))
const v1 = instanceProxy.toggle
expect(v1).toEqual('a')
Object.defineProperty(instanceProxy, 'toggle', {
value: 'b',
})
const v2 = instanceProxy.toggle
expect(v2).toEqual('b')
// expect null to be a settable value
Object.defineProperty(instanceProxy, 'toggle', {
value: null,
})
const v3 = instanceProxy.toggle
expect(v3).toBeNull()
})
test('defineProperty on public instance proxy should work with SETUP,DATA,CONTEXT,PROPS', () => {
// #5417
let instanceProxy: any
const Comp = {
props: ['fromProp'],
data() {
return { name: 'data.name' }
},
computed: {
greet() {
return 'Hi ' + (this as any).name
},
},
render() {},
setup() {
return {
fromSetup: true,
}
},
mounted() {
instanceProxy = this
},
}
const app = createApp(Comp, {
fromProp: true,
})
app.mount(nodeOps.createElement('div'))
expect(instanceProxy.greet).toEqual('Hi data.name')
// define property on data
Object.defineProperty(instanceProxy, 'name', {
get() {
return 'getter.name'
},
})
// computed is same still cached
expect(instanceProxy.greet).toEqual('Hi data.name')
// trigger computed
instanceProxy.name = ''
// expect "greet" to evaluated and use name from context getter
expect(instanceProxy.greet).toEqual('Hi getter.name')
// defineProperty on computed ( context )
Object.defineProperty(instanceProxy, 'greet', {
get() {
return 'Hi greet.getter.computed'
},
})
expect(instanceProxy.greet).toEqual('Hi greet.getter.computed')
// defineProperty on setupState
expect(instanceProxy.fromSetup).toBe(true)
Object.defineProperty(instanceProxy, 'fromSetup', {
get() {
return false
},
})
expect(instanceProxy.fromSetup).toBe(false)
// defineProperty on Props
expect(instanceProxy.fromProp).toBe(true)
Object.defineProperty(instanceProxy, 'fromProp', {
get() {
return false
},
})
expect(instanceProxy.fromProp).toBe(false)
})
// #864
test('should not warn declared but absent props', () => {
const Comp = {
props: ['test'],
render(this: any) {
return this.test
},
}
render(h(Comp), nodeOps.createElement('div'))
expect(
`was accessed during render but is not defined`,
).not.toHaveBeenWarned()
})
test('should allow symbol to access on render', () => {
const Comp = {
render() {
if ((this as any)[Symbol.unscopables]) {
return '1'
}
return '2'
},
}
const app = createApp(Comp)
app.mount(nodeOps.createElement('div'))
expect(
`Property ${JSON.stringify(
Symbol.unscopables,
)} was accessed during render ` + `but is not defined on instance.`,
).toHaveBeenWarned()
})
test('should prevent mutating script setup bindings', () => {
const Comp = defineComponent({
render() {},
setup() {
return {
__isScriptSetup: true,
foo: 1,
}
},
mounted() {
expect('foo' in this).toBe(false)
try {
this.foo = 123
} catch (e) {}
},
})
render(h(Comp), nodeOps.createElement('div'))
expect(`Cannot mutate <script setup> binding "foo"`).toHaveBeenWarned()
})
}) | typescript | github | https://github.com/vuejs/core | packages/runtime-core/__tests__/componentPublicInstance.spec.ts |
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_COFF_AUXILIARY_CLR_TOKEN_H
#define LIEF_COFF_AUXILIARY_CLR_TOKEN_H
#include <memory>
#include "LIEF/visibility.h"
#include "LIEF/COFF/AuxiliarySymbol.hpp"
namespace LIEF {
namespace COFF {
class Symbol;
class Parser;
/// Auxiliary symbol associated with the `CLR_TOKEN` storage class
class LIEF_API AuxiliaryCLRToken : public AuxiliarySymbol {
public:
friend class Parser;
LIEF_LOCAL static std::unique_ptr<AuxiliaryCLRToken>
parse(const std::vector<uint8_t>& payload);
AuxiliaryCLRToken() :
AuxiliarySymbol(AuxiliarySymbol::TYPE::CLR_TOKEN)
{}
AuxiliaryCLRToken(uint8_t aux_type, uint8_t reserved, uint32_t symbol_idx,
std::vector<uint8_t> rgb_reserved) :
AuxiliarySymbol(AuxiliarySymbol::TYPE::CLR_TOKEN),
aux_type_(aux_type),
reserved_(reserved),
symbol_idx_(symbol_idx),
rgb_reserved_(std::move(rgb_reserved))
{}
AuxiliaryCLRToken(const AuxiliaryCLRToken&) = default;
AuxiliaryCLRToken& operator=(const AuxiliaryCLRToken&) = default;
AuxiliaryCLRToken(AuxiliaryCLRToken&&) = default;
AuxiliaryCLRToken& operator=(AuxiliaryCLRToken&&) = default;
std::unique_ptr<AuxiliarySymbol> clone() const override {
return std::unique_ptr<AuxiliaryCLRToken>(new AuxiliaryCLRToken{*this});
}
/// `IMAGE_AUX_SYMBOL_TYPE` which should be `IMAGE_AUX_SYMBOL_TYPE_TOKEN_DEF` (1)
uint8_t aux_type() const {
return aux_type_;
}
/// Reserved value (should be 0)
uint8_t reserved() const {
return aux_type_;
}
/// Index in the symbol table
uint32_t symbol_idx() const {
return symbol_idx_;
}
/// Symbol referenced by symbol_idx() (if resolved)
const Symbol* symbol() const {
return sym_;
}
Symbol* symbol() {
return sym_;
}
/// Reserved (padding) values. Should be 0
span<const uint8_t> rgb_reserved() const {
return rgb_reserved_;
}
std::string to_string() const override;
static bool classof(const AuxiliarySymbol* sym) {
return sym->type() == AuxiliarySymbol::TYPE::CLR_TOKEN;
}
~AuxiliaryCLRToken() override = default;
private:
uint8_t aux_type_ = 0;
uint8_t reserved_ = 0;
uint32_t symbol_idx_ = 0;
std::vector<uint8_t> rgb_reserved_;
Symbol* sym_ = nullptr;
};
}
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/include/LIEF/COFF/AuxiliarySymbols/AuxiliaryCLRToken.hpp |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.api.openstack import api_version_request as api_version
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class MicroversionsTest(test.NoDBTestCase):
header_name = 'X-OpenStack-Nova-API-Version'
def _test_microversions(self, app, req, ret_code, ret_header=None):
req.environ['CONTENT_TYPE'] = "application/json"
res = req.get_response(app)
self.assertEqual(ret_code, res.status_int)
if ret_header:
self.assertEqual(ret_header,
res.headers[self.header_name])
return res
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_no_header(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_return_header(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
self.assertEqual("2.1", res.headers[self.header_name])
self.assertEqual(self.header_name, res.headers['Vary'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_return_header_non_default(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
req.headers = {self.header_name: '2.3'}
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val2', resp_json['param'])
self.assertEqual("2.3", res.headers[self.header_name])
self.assertEqual(self.header_name, res.headers['Vary'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_return_header_fault(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.0")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
req.headers = {self.header_name: '3.0'}
res = req.get_response(app)
self.assertEqual(400, res.status_int)
self.assertEqual("3.0", res.headers[self.header_name])
self.assertEqual(self.header_name, res.headers['Vary'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def _check_microversion_response(self, url, req_version, resp_param,
mock_namespace, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest('2.3')
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank(url)
req.headers = {self.header_name: req_version}
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual(resp_param, resp_json['param'])
def test_microversions_with_header(self):
self._check_microversion_response('/v2/fake/microversions',
'2.3', 'val2')
def test_microversions_with_header_exact_match(self):
self._check_microversion_response('/v2/fake/microversions',
'2.2', 'val2')
def test_microversions2_no_2_1_version(self):
self._check_microversion_response('/v2/fake/microversions2',
'2.3', 'controller2_val1')
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_later_version(self, mock_namespace, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.1")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {self.header_name: '3.0'}
res = req.get_response(app)
self.assertEqual(202, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('controller2_val2', resp_json['param'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_version_too_high(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.5")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {self.header_name: '3.2'}
res = req.get_response(app)
self.assertEqual(404, res.status_int)
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_version_too_low(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {self.header_name: '2.1'}
res = req.get_response(app)
self.assertEqual(404, res.status_int)
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_global_version_too_high(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.5")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {self.header_name: '3.7'}
res = req.get_response(app)
self.assertEqual(406, res.status_int)
res_json = jsonutils.loads(res.body)
self.assertEqual("Version 3.7 is not supported by the API. "
"Minimum is 2.1 and maximum is 3.5.",
res_json['computeFault']['message'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_schema(self, mock_namespace, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions3')
req.method = 'POST'
req.headers = {self.header_name: '2.2'}
req.environ['CONTENT_TYPE'] = "application/json"
req.body = jsonutils.dumps({'dummy': {'val': 'foo'}})
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('create_val1', resp_json['param'])
self.assertEqual("2.2", res.headers[self.header_name])
self.assertEqual(self.header_name, res.headers['Vary'])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_schema_fail(self, mock_namespace, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions3')
req.method = 'POST'
req.headers = {self.header_name: '2.2'}
req.environ['CONTENT_TYPE'] = "application/json"
req.body = jsonutils.dumps({'dummy': {'invalid_param': 'foo'}})
res = req.get_response(app)
self.assertEqual(400, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertTrue(resp_json['badRequest']['message'].startswith(
"Invalid input for field/attribute dummy."))
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_schema_out_of_version_check(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions3/1')
req.method = 'PUT'
req.headers = {self.header_name: '2.2'}
req.body = jsonutils.dumps({'dummy': {'inv_val': 'foo'}})
req.environ['CONTENT_TYPE'] = "application/json"
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('update_val1', resp_json['param'])
self.assertEqual("2.2", res.headers[self.header_name])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_schema_second_version(self, mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions3/1')
req.headers = {self.header_name: '2.10'}
req.environ['CONTENT_TYPE'] = "application/json"
req.method = 'PUT'
req.body = jsonutils.dumps({'dummy': {'val2': 'foo'}})
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('update_val1', resp_json['param'])
self.assertEqual("2.10", res.headers[self.header_name])
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def _test_microversions_inner_function(self, version, expected_resp,
mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.2")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions4')
req.headers = {self.header_name: version}
req.environ['CONTENT_TYPE'] = "application/json"
req.method = 'POST'
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual(expected_resp, resp_json['param'])
self.assertEqual(version, res.headers[self.header_name])
def test_microversions_inner_function_v22(self):
self._test_microversions_inner_function('2.2', 'controller4_val2')
def test_microversions_inner_function_v21(self):
self._test_microversions_inner_function('2.1', 'controller4_val1')
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_with_extends_decorator(self, mock_namespace, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest('2.4')
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions5/item')
req.headers = {'X-OpenStack-Nova-API-Version': '2.4'}
res = req.get_response(app)
self.assertEqual(200, res.status_int)
expected_res = {
"extend_ctrlr2": "val_2",
"extend_ctrlr1": "val_1",
"base_param": "base_val"}
resp_json = jsonutils.loads(res.body)
for param in resp_json:
self.assertIn(param, expected_res)
self.assertEqual(expected_res[param], resp_json[param])
self.assertEqual(3, len(resp_json))
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def _test_microversions_actions(self, ret_code, ret_header, req_header,
mock_namespace,
mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.3")
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions3/1/action')
if req_header:
req.headers = {self.header_name: req_header}
req.method = 'POST'
req.body = jsonutils.dumps({'foo': None})
res = self._test_microversions(app, req, ret_code,
ret_header=ret_header)
if ret_code == 202:
resp_json = jsonutils.loads(res.body)
self.assertEqual({'foo': 'bar'}, resp_json)
def test_microversions_actions(self):
self._test_microversions_actions(202, "2.1", "2.1")
def test_microversions_actions_too_high(self):
self._test_microversions_actions(404, "2.3", "2.3")
def test_microversions_actions_no_header(self):
self._test_microversions_actions(202, "2.1", None) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import division, absolute_import, print_function
import os
import re
import sys
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command, find_executable
from numpy.distutils.misc_util import make_temp_file
from distutils import log
compilers = ['IBMFCompiler']
class IBMFCompiler(FCompiler):
compiler_type = 'ibm'
description = 'IBM XL Fortran Compiler'
version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)'
#IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004
executables = {
'version_cmd' : ["<F77>", "-qversion"],
'compiler_f77' : ["xlf"],
'compiler_fix' : ["xlf90", "-qfixed"],
'compiler_f90' : ["xlf90"],
'linker_so' : ["xlf95"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_version(self,*args,**kwds):
version = FCompiler.get_version(self,*args,**kwds)
if version is None and sys.platform.startswith('aix'):
# use lslpp to find out xlf version
lslpp = find_executable('lslpp')
xlf = find_executable('xlf')
if os.path.exists(xlf) and os.path.exists(lslpp):
s, o = exec_command(lslpp + ' -Lc xlfcmp')
m = re.search('xlfcmp:(?P<version>\d+([.]\d+)+)', o)
if m: version = m.group('version')
xlf_dir = '/etc/opt/ibmcmp/xlf'
if version is None and os.path.isdir(xlf_dir):
# linux:
# If the output of xlf does not contain version info
# (that's the case with xlf 8.1, for instance) then
# let's try another method:
l = sorted(os.listdir(xlf_dir))
l.reverse()
l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))]
if l:
from distutils.version import LooseVersion
self.version = version = LooseVersion(l[0])
return version
def get_flags(self):
return ['-qextname']
def get_flags_debug(self):
return ['-g']
def get_flags_linker_so(self):
opt = []
if sys.platform=='darwin':
opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress')
else:
opt.append('-bshared')
version = self.get_version(ok_status=[0, 40])
if version is not None:
if sys.platform.startswith('aix'):
xlf_cfg = '/etc/xlf.cfg'
else:
xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
fo, new_cfg = make_temp_file(suffix='_xlf.cfg')
log.info('Creating '+new_cfg)
fi = open(xlf_cfg, 'r')
crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match
for line in fi:
m = crt1_match(line)
if m:
fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
else:
fo.write(line)
fi.close()
fo.close()
opt.append('-F'+new_cfg)
return opt
def get_flags_opt(self):
return ['-O3']
if __name__ == '__main__':
log.set_verbosity(2)
compiler = IBMFCompiler()
compiler.customize()
print(compiler.get_version()) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.training.python.training import training
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib2
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
# pylint: enable=g-import-not-at-top
def logistic_classifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def batchnorm_classifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1, fused=False)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class ClipGradsTest(test.TestCase):
def testClipGrads(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms(
gradients_to_variables, 3.0)
with self.test_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
def testClipGradsFn(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms_fn(3.0)(
gradients_to_variables)
with self.test_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
class CreateTrainOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
# Create an easy training set:
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testTrainOpInCollection(self):
with ops.Graph().as_default():
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, update_ops=[])
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testGlobalStepIsIncrementedByDefault(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
global_step = variables_lib.get_or_create_global_step()
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# After 10 updates global_step should be 10.
self.assertAllClose(global_step.eval(), 10)
def testGlobalStepNotIncrementedWhenSetToNone(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step.eval(), 0)
class TrainBatchNormClassifierTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertLess(loss, .1)
class TrainTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib.local_variable(1.0)
tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
number_of_steps = [300, 1, 5]
logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(
num_steps=number_of_steps[i]),
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=50, saver=saver),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
def transform_grads_fn(grads):
if gradient_multiplier != 1.0:
variables = variables_lib2.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
with ops.name_scope('multiply_grads'):
return training.multiply_gradients(grads, gradient_multipliers)
else:
return grads
return training.create_train_op(
total_loss, optimizer, transform_grads_fn=transform_grads_fn)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/')
logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
if gfile.Exists(logdir1): # For running on jenkins.
gfile.DeleteRecursively(logdir1)
if gfile.Exists(logdir2): # For running on jenkins.
gfile.DeleteRecursively(logdir2)
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=1),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib2.global_variables()
model_path = saver_lib.latest_checkpoint(logdir1)
assign_fn = variables_lib.assign_from_checkpoint_fn(
model_path, model_variables)
def init_fn(_, session):
assign_fn(session)
loss = training.train(
train_op,
None,
scaffold=monitored_session.Scaffold(init_fn=init_fn),
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
return losses.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
if gfile.Exists(logdir): # For running on jenkins.
gfile.DeleteRecursively(logdir)
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights = variables_lib.get_variables_by_name('weights')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=weights)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=200, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=200),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
biases = variables_lib.get_variables_by_name('biases')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=biases)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=400),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = variables_lib.get_variables()
train_op = training.create_train_op(total_loss, optimizer)
train_weights = training.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = training.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with self.test_session() as session:
# Initialize the variables.
session.run(variables_lib2.global_variables_initializer())
# Get the initial weights and biases values.
weights_values, biases_values = session.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = session.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = session.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = session.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
multipliers = [1., 1000.]
number_of_steps = 10
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[0])
loss0 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss0)
self.assertGreater(loss0, .5)
# Second, train the model with equivalently larger learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[1])
loss1 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss1)
self.assertLess(loss1, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(loss0, loss1)
if __name__ == '__main__':
test.main() | unknown | codeparrot/codeparrot-clean | ||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.errors import AnsibleError
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ConnectionBase
#from ansible.plugins.connection.accelerate import Connection as AccelerateConnection
#from ansible.plugins.connection.chroot import Connection as ChrootConnection
#from ansible.plugins.connection.funcd import Connection as FuncdConnection
#from ansible.plugins.connection.jail import Connection as JailConnection
#from ansible.plugins.connection.libvirt_lxc import Connection as LibvirtLXCConnection
from ansible.plugins.connection.lxc import Connection as LxcConnection
from ansible.plugins.connection.local import Connection as LocalConnection
from ansible.plugins.connection.paramiko_ssh import Connection as ParamikoConnection
from ansible.plugins.connection.ssh import Connection as SSHConnection
from ansible.plugins.connection.docker import Connection as DockerConnection
#from ansible.plugins.connection.winrm import Connection as WinRmConnection
from ansible.plugins.connection.network_cli import Connection as NetworkCliConnection
class TestConnectionBaseClass(unittest.TestCase):
def setUp(self):
self.play_context = PlayContext()
self.in_stream = StringIO()
def tearDown(self):
pass
def test_subclass_error(self):
class ConnectionModule1(ConnectionBase):
pass
with self.assertRaises(TypeError):
ConnectionModule1()
class ConnectionModule2(ConnectionBase):
def get(self, key):
super(ConnectionModule2, self).get(key)
with self.assertRaises(TypeError):
ConnectionModule2()
def test_subclass_success(self):
class ConnectionModule3(ConnectionBase):
@property
def transport(self):
pass
def _connect(self):
pass
def exec_command(self):
pass
def put_file(self):
pass
def fetch_file(self):
pass
def close(self):
pass
self.assertIsInstance(ConnectionModule3(self.play_context, self.in_stream), ConnectionModule3)
# def test_accelerate_connection_module(self):
# self.assertIsInstance(AccelerateConnection(), AccelerateConnection)
#
# def test_chroot_connection_module(self):
# self.assertIsInstance(ChrootConnection(), ChrootConnection)
#
# def test_funcd_connection_module(self):
# self.assertIsInstance(FuncdConnection(), FuncdConnection)
#
# def test_jail_connection_module(self):
# self.assertIsInstance(JailConnection(), JailConnection)
#
# def test_libvirt_lxc_connection_module(self):
# self.assertIsInstance(LibvirtLXCConnection(), LibvirtLXCConnection)
def test_lxc_connection_module(self):
self.assertIsInstance(LxcConnection(self.play_context, self.in_stream), LxcConnection)
def test_local_connection_module(self):
self.assertIsInstance(LocalConnection(self.play_context, self.in_stream), LocalConnection)
def test_paramiko_connection_module(self):
self.assertIsInstance(ParamikoConnection(self.play_context, self.in_stream), ParamikoConnection)
def test_ssh_connection_module(self):
self.assertIsInstance(SSHConnection(self.play_context, self.in_stream), SSHConnection)
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.2.3', '', 0))
def test_docker_connection_module_too_old(self, mock_new_docker_verison, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^docker connection type requires docker 1.3 or higher$',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.3.4', '', 0))
def test_docker_connection_module(self, mock_new_docker_verison, mock_old_docker_version):
self.assertIsInstance(DockerConnection(self.play_context, self.in_stream, docker_command='/fake/docker'),
DockerConnection)
# old version and new version fail
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('false', 'garbage', '', 1))
def test_docker_connection_module_wrong_cmd(self, mock_new_docker_version, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^Docker version check (.*?) failed: ',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
# def test_winrm_connection_module(self):
# self.assertIsInstance(WinRmConnection(), WinRmConnection)
def test_network_cli_connection_module(self):
self.assertIsInstance(NetworkCliConnection(self.play_context, self.in_stream), NetworkCliConnection)
self.assertIsInstance(NetworkCliConnection(self.play_context, self.in_stream), ParamikoConnection) | unknown | codeparrot/codeparrot-clean | ||
#._cv_part guppy.etc.RE
from guppy.etc.RE_Rect import chooserects
from guppy.etc.IterPermute import iterpermute
class InfiniteError(Exception):
pass
class WordsMemo:
def __init__(self, re, ch):
self.re = re
self.ch = ch
self.xs = {}
self.N = 0
def get_words_of_length(self, N):
# Return a list of words of length up to N
if N not in self.xs:
self.xs[N] = self.re.get_words_of_length_memoized(N, self)
return self.xs[N]
def get_words_of_length_upto(self, N):
# Return all words of length up to N, in the form
# [(0, <list of words of length 0>),
# (1, <list of words of length 0>),
# ...]
xsu = []
for i in range(N+1):
xs = self.get_words_of_length(i)
if xs:
xsu.append((i, xs))
return xsu
REBASE = tuple
class RE(REBASE):
# Regular expression nodes
# The operators are choosen to be compatible with Pythonic standards:
# o sets : using | for union
# o strings, sequences : using + for concatenation.
#
# This differs from mathematical presentations of regular
# expressions where + is the union, but it seemed more important
# to not confuse the Python usage.
# There are also operators for closure x*, x+ that can not be
# represented directly in Python expressions and these were choosen
# to use a function call syntax.
# The following table summarizes the operators.
# RE node expr re lib mathematical name
# x + y x y x y Concatenation
# x | y x | y x + y Union
# x('*') x* x* Kleene closure
# x('+') x+ x+ Positive closure
# x('?') x?
_re_special = r'.^$*+?{}\[]|()'
def __add__(a, b):
if isinstance(b, RE):
return concat(a, b)
else:
return Concatenation(a, Single(b))
def __call__(a, *args, **kwds):
if not kwds:
if args == ('*',):
return KleeneClosure(a)
elif args == ('+',):
return PositiveClosure(a)
elif args == ('?',):
return EpsilonOrOne(a)
raise ValueError, "Argument to regular expression must be '*' or '+' or '?'"
def __eq__(a, b):
return (a._name == b._name and
tuple(a) == tuple(b))
def __lt__(a, b):
if a._name == b._name:
return tuple(a) < tuple(b)
else:
return a._name < b._name
def __or__(a, b):
return Union(a, b)
def get_num_closures(self):
ns = 0
for ch in self:
ns += ch.get_num_closures()
return ns
def get_num_syms(self):
ns = 0
for ch in self:
ns += ch.get_num_syms()
return ns
def get_sum_sym_lengths(self):
ns = 0
for ch in self:
ns += ch.get_sum_sym_lengths()
return ns
def get_words_memo(self):
ch = [x.get_words_memo() for x in self]
return WordsMemo(self, ch)
def get_words_of_length(self, N):
xs = self.get_words_memo()
return xs.get_words_of_length(N)
def mapchildren(self, f):
return self.__class__(*[f(x) for x in self])
def regexpform(self):
return self.mappedrepr(regexpname)
def reversed(self):
return self.mapchildren(lambda x:x.reversed())
def rempretup(self):
def f(x):
if isinstance(x, Seq):
if x is not Epsilon and isinstance(x[0], tuple):
ws = x[1:]
return Seq(*ws)
else:
return x
return x.mapchildren(f)
return f(self)
def seqatoms(self):
sa = []
self.apseqatoms(sa.append)
return sa
def sequni(self):
d = {}
us = []
def ap(x):
if x not in d:
d[x] = 1
us.append(x)
self.apseq(ap)
return Union(*us)
def shform(self, conc = ' '):
r = self.mappedrepr(regexpname)
if conc != ' ':
r = conc.join(r.split(' '))
return r
def simplified(self, *a, **k):
return self
def simulform(self):
def f(x):
if x == '':
return '()'
return str(x)
return self.mappedrepr(f)
def regexpname(s):
if s == '':
return '()'
special = RE._re_special
ren = []
for c in str(s):
if c in special+"', ":
#c = r'\%s'%c
c = ''
ren.append(c)
return ''.join(ren)
def re_compare(a, b):
return a.__cmp__(b)
class Seq(RE):
_priority = 0
_name = 'Seq'
def __new__(clas, *symbols):
if not symbols:
return Epsilon
return REBASE.__new__(clas, symbols)
def __repr__(self):
return '%s(%s)'%(self.__class__.__name__, ', '.join(['%r'%(x,) for x in self]))
def __hash__(self):
return hash(repr(self))
def apseq(self, ap):
ap(self)
def apseqatoms(self, ap):
for x in self:
ap(Single(x))
def get_num_closures(self):
return 0
def get_num_syms(self):
return len(self)
def get_sum_sym_lengths(self):
s = 0
for x in self:
s += len(str(x))
return s
def get_words_memo(self):
return WordsMemo(self, ())
def get_words_of_length_memoized(self, N, memo):
if N == len(self):
return [self]
else:
return []
def limited(self, N):
return self
def mappedrepr(self, f):
if not self:
return f('')
return ' '.join(['%s'%(f(x),) for x in self])
def reversed(self):
r = list(self)
r.reverse()
return self.__class__(*r)
def unionsplitted(self):
return [self]
def Single(symbol):
return REBASE.__new__(Seq, (symbol,))
Epsilon = REBASE.__new__(Seq, ())
def concat(*args):
args = [x for x in args if x is not Epsilon]
if len(args) < 2:
if not args:
return Epsilon
return args[0]
return REBASE.__new__(Concatenation, args)
class Concatenation(RE):
_priority = 2
_name = 'Concat'
def __new__(clas, *args):
#assert Epsilon not in args
if len(args) < 2:
if not args:
return Epsilon
return args[0]
return REBASE.__new__(clas, args)
def __repr__(self):
rs = []
for ch in self:
r = '%r'%(ch,)
if ch._priority > self._priority:
r = '(%s)'%(r,)
rs.append(r)
return ' + '.join(rs)
def apseq(self, ap):
uns = [x.sequni() for x in self]
ixs = [0]*len(uns)
while 1:
xs = []
for (i, us) in enumerate(uns):
for x in us[ixs[i]]:
if x is not Epsilon:
xs.append(x)
ap(Seq(*xs))
j = 0
for j, ix in enumerate(ixs):
ix += 1
if ix >= len(uns[j]):
ix = 0
ixs[j] = ix
if ix != 0:
break
else:
break
def apseqatoms(self, ap):
for x in self:
x.apseqatoms(ap)
def get_words_of_length_memoized(self, N, memo):
chxs = []
for ch in memo.ch:
chxs.append(ch.get_words_of_length_upto(N))
xs = []
seen = {}
def ads(xx, i, n):
if i == len(chxs):
if n == N:
for toconc in iterpermute(*xx):
conc = simple_Concatenation(toconc)
if conc not in seen:
xs.append(conc)
seen[conc] = 1
else:
for m, x in chxs[i]:
if n + m <= N:
ads(xx + [x], i + 1, n + m)
ads([], 0, 0)
return xs
def limited(self, N):
return Concatenation(*[x.limited(N) for x in self])
def mappedrepr(self, f):
rs = []
for ch in self:
r = ch.mappedrepr(f)
if ch._priority > self._priority:
r = '(%s)'%(r,)
rs.append(r)
return ' '.join(rs)
def reversed(self):
r = [x.reversed() for x in self]
r.reverse()
return self.__class__(*r)
def simplified(self, *a, **k):
conc = [x.simplified(*a, **k) for x in self]
sa = []
for c in conc:
for a in c.seqatoms():
sa.append(a)
return simple_Concatenation(sa)
def unionsplitted(self):
runs = []
uns = []
for (i, x) in enumerate(self):
us = x.unionsplitted()
if len(us) > 1:
uns.append((i, us))
if not uns:
return [self]
ixs = [0]*len(uns)
ch = list(self)
while 1:
xs = []
i0 = 0
for j, (i, us) in enumerate(uns):
xs.extend(ch[i0:i])
ix = ixs[j]
xs.append(us[ix])
i0 = i + 1
xs.extend(ch[i0:])
runs.append( concat(*xs) )
j = 0
for j, ix in enumerate(ixs):
ix += 1
if ix >= len(uns[j][1]):
ix = 0
ixs[j] = ix
if ix != 0:
break
else:
return runs
class SimplifiedConcatenation(Concatenation):
def simplified(self, *a, **k):
# pdb.set_trace()
return self
def conclosure(conc):
# Simplification noted Mar 5 2005
# Simplify ... b b* ... or ... b* b ... to ... b+ ...
# conc is a sequence of regular expressions
seen = {}
nconc = []
w0 = None
for w in conc:
if w0 is not None:
if (w._name == '*' and # Not isinstance(KleeneClosure), would catch PositiveClosure
w[0] == w0):
w = PositiveClosure(w0)
elif (w0._name == '*' and
w0[0] == w):
w = PositiveClosure(w)
else:
if w0 is not None:
nconc.append(w0)
w0 = w
if w0 is not None:
nconc.append(w0)
return nconc
def simple_Concatenation(conc):
if len(conc) > 1:
conc0 = conc
conc = conclosure(conc)
nconc = []
i = 0
j = 0
while i < len(conc):
e = conc[i]
if not isinstance(e, Seq):
i += 1
nconc.append(e)
continue
j = i
while j < len(conc):
if not isinstance(conc[j], Seq):
break
j += 1
if j == i + 1:
nconc.append(e)
else:
syms = []
for k in range(i, j):
e = conc[k]
syms.extend(list(e))
nconc.append(Seq(*syms))
i = j
if len(nconc) > 1:
return Concatenation(*nconc)
elif nconc:
return nconc[0]
else:
return Epsilon
gauges = [
lambda x:x.get_num_syms(),
lambda x:x.get_num_closures(),
lambda x:x.get_sum_sym_lengths()
]
def simpleunion(lines, trace=''):
choosen = chooserects(lines, gauges, trace)
have_epsilon = 0
while 1:
if len(choosen) == 1 and (choosen[0].width == 0 or len(choosen[0].lines) == 1):
us = []
for line in choosen[0].lines:
if line:
us.append(line)
else:
have_epsilon = 1
break
us = []
for r in choosen:
conc = r.get_common_part()
olines = r.get_uncommons()
u = simpleunion(olines)
if u is not Epsilon:
if r.dir == -1:
conc = [u]+conc
else:
conc = conc + [u]
if conc:
us.append(conc)
else:
have_epsilon = 1
assert not isinstance(us[-1], str)
choosen = chooserects(us, gauges, trace)
if len(us) > 1:
nus = [simple_Concatenation(line) for line in us]
u = SimplifiedUnion(*nus)
elif us:
u = simple_Concatenation(us[0])
else:
u = None
if have_epsilon:
if u is not None:
u = simple_EpsilonOrOne(u)
else:
u = Epsilon
return u
class Union(RE):
_priority = 3
_name = 'Union'
def __new__(clas, *args):
return REBASE.__new__(clas, args)
def __repr__(self):
rs = []
for ch in self:
r = '%r'%(ch,)
if ch._priority > self._priority:
r = '(%s)'%r
rs.append(r)
return ' | '.join(rs)
def apseq(self, ap):
for c in self:
c.apseq(ap)
def apseqatoms(self, ap):
for x in self:
x.apseqatoms(ap)
def get_words_of_length_memoized(self, N, memo):
xs = []
seen = {}
for ch in memo.ch:
for x in ch.get_words_of_length(N):
if x not in seen:
seen[x] = 1
xs.append(x)
return xs
def limited(self, N):
uni = [x.limited(N) for x in self]
for i, x in enumerate(uni):
if x is not self[i]:
return self.__class__(*uni)
return self
def mappedrepr(self, f):
rs = []
for ch in self:
r = '%s'%(ch.mappedrepr(f),)
if ch._priority > self._priority:
r = '(%s)'%r
rs.append(r)
return ' | '.join(rs)
def simplified(self, args=None, trace='', *a, **k):
if args is None:
args = [x.simplified() for x in self.unionsplitted()]
#args = [x for x in self.unionsplitted()]
# Create a simplfied union
# Assuming args are simplified, non-unions
ch = [a.seqatoms() for a in args]
return simpleunion(ch, trace)
def unionsplitted(self):
us = []
for x in self:
us.extend(list(x.unionsplitted()))
return us
class SimplifiedUnion(Union):
def simplified(self, *a, **k):
return self
class Called(RE):
_priority = 1
def __new__(clas, arg):
return REBASE.__new__(clas, (arg,))
def __repr__(self):
ch = self[0]
r = '%r'%(ch,)
if ch._priority > self._priority:
r = '(%s)'%r
return "%s(%r)"%(r, self._name)
def apseqatoms(self, ap):
ap(self)
def get_num_closures(self):
return 1 + self[0].get_num_closures()
def mappedrepr(self, f):
ch = self[0]
r = ch.mappedrepr(f)
if (ch._priority > self._priority
or isinstance(ch, Seq) and len(ch) > 1):
r = '(%s)'%r
return "%s%s"%(r, self._name)
def simplified(self, *a, **k):
return self.__class__(self[0].simplified(*a, **k))
class Closure(Called):
def get_words_of_length_memoized(self, N, memo):
if N == 0:
return [Epsilon]
if N == 1:
return memo.ch[0].get_words_of_length(1)
xs = []
seen = {}
for i in range(1, N):
a = memo.get_words_of_length(i)
b = memo.get_words_of_length(N-i)
for ai in a:
for bi in b:
aibi = simple_Concatenation((ai, bi))
if aibi not in seen:
xs.append(aibi)
seen[aibi] = 1
for x in memo.ch[0].get_words_of_length(N):
if x not in seen:
xs.append(x)
seen[x] = 1
return xs
def unionsplitted(self):
return [self]
class KleeneClosure(Closure):
_name = '*'
def apseq(self, ap):
raise InfiniteError, 'apseq: Regular expression is infinite: contains a Kleene Closure'
def limited(self, N):
if N == 0:
return Epsilon
cl = self[0].limited(N)
uni = []
for i in range(N+1):
toconc = [cl]*i
uni.append(Concatenation(*toconc))
return Union(*uni)
def simplified(self, *a, **k):
return simple_KleeneClosure(self[0].simplified(*a, **k))
def simple_KleeneClosure(x):
# (b+)* -> b*
if x._name == '+':
return simple_KleeneClosure(x[0])
return KleeneClosure(x)
class PositiveClosure(Closure):
_name = '+'
def apseq(self, ap):
raise InfiniteError, 'apseq: Regular expression is infinite: contains a Positive Closure'
def apseqatoms(self, ap):
self[0].apseqatoms(ap)
simple_KleeneClosure(self[0]).apseqatoms(ap)
def get_words_of_length_memoized(self, N, memo):
if N <= 1:
return memo.ch[0].get_words_of_length(N)
return Closure.get_words_of_length_memoized(self, N, memo)
def limited(self, N):
a = self[0].limited(N)
b = KleeneClosure(self[0]).limited(N)
return Concatenation(a, b)
class EpsilonOrOne(Called):
_name = '?'
def apseq(self, ap):
ap(Epsilon)
self[0].apseq(ap)
def get_words_of_length_memoized(self, N, memo):
if N == 0:
return [Epsilon]
return memo.ch[0].get_words_of_length(N)
def limited(self, N):
x = self[0].limited(N)
if x is not self[0]:
self = self.__class__(x)
return self
def simplified(self, *a, **k):
return simple_EpsilonOrOne(self[0].simplified(*a, **k))
def unionsplitted(self):
return [Epsilon] + list(self[0].unionsplitted())
def simple_EpsilonOrOne(x):
# (a+)? -> a*
if x._name == '+':
return simple_KleeneClosure(x)
# (a*)? -> a*
if x._name == '*':
return x
return EpsilonOrOne(x)
class RegularSystem:
def __init__(self, table, Start, final_states):
self.table = table
self.Start = Start
self.Final = '358f0eca5c34bacdfbf6a8ac0ccf84bc'
self.final_states = final_states
def pp(self):
def statename(state):
try:
name = self.names[state]
except KeyError:
name = str(state)
return name
def transname(trans):
name = trans.simulform()
if trans._priority > 1:
name = '(%s)'%(name,)
return name
self.setup_names()
X = self.X
xs = [self.Start]+self.order
xs.append(self.Final)
for Xk in xs:
if Xk not in X:
continue
print '%3s = '%(statename(Xk),),
Tk = X[Xk]
es = []
for Xj in xs:
if Xj in Tk:
es.append('%s %s'%(transname(Tk[Xj]), statename(Xj)))
if es:
print ' | '.join(es)
else:
print
def setup_equations(self):
table = self.table
final_states = self.final_states
Final = self.Final
self.X = X = {Final:{}}
for Xi, transitions in table.items():
X[Xi] = Ti = {}
for (symbol, Xj) in transitions.items():
Ti.setdefault(Xj, []).append(Single(symbol))
for Xj, Aij in Ti.items():
if len(Aij) > 1:
Aij.sort()
Aij = Union(*Aij)
else:
Aij = Aij[0]
Ti[Xj] = Aij
if Xi in final_states:
Ti[Final] = Epsilon
def setup_order(self):
def dists(X, start):
i = 0
S = {start:i}
news = [start]
while news:
oldnews = news
news = []
i += 1
for s in oldnews:
if s not in X:
continue
for t in X[s]:
if t not in S:
news.append(t)
S[t] = i
return S
def start_distance(x):
return start_dists[x]
def sumt(f):
memo = {}
def g(x):
if x in memo:
return memo[x]
s = 0.0
for y in X[x]:
s += f(y)
memo[x] = s
return s
return g
def cmp3(x, y):
# Comparison for the sorting of equation solving order
# First in list = solved last
if x is y:
return 0
c = cmp(len(X[y]), len(X[x])) # Equations with more terms are resolved later
if c:
return c
# The equations with terms more distant from start node will be resolved earlier
i = 0
while i < 10: # 4 was enough with tests so far at Feb 24 2005
try:
f = sumdists[i]
except:
f = sumt(sumdists[i-1])
sumdists.append(f)
c = cmp(f(x), f(y))
if c:
return c
i += 1
#pdb.set_trace()
return cmp(x, y)
sumdists = [start_distance]
X = self.X
Start = self.Start
Final = self.Final
start_dists = dists(X, Start)
order = [x for x in start_dists if x is not Start and x is not Final]
order.sort(cmp3)
self.order = order
def setup_names(self):
try:
self.order
except AttributeError:
self.setup_order()
self.names = {}
self.names[self.Start] = 'X0'
for i, s in enumerate(self.order):
self.names[s] = 'X%d'%(i+1)
self.names[self.Final] = 'Final'
def solve(self):
# Set up equation system
self.setup_equations()
self.setup_order()
X = self.X
Start = self.Start
Final = self.Final
todo = list(self.order)
# Solve equation system
while todo:
Xk = todo.pop()
Tk = X[Xk]
if Xk in Tk:
# Recursive equation
# Eliminate Akk Xk, using Adler's theorem
# Given:
# Xk = Ak0 X0 | ... Akk Xk |.. Akn Xkn
# we get:
# Xk = Akk* (Ak0 X0 | ... <no Xk> ... | Akn Xn)
# which we evaluate to:
# Xk = Bk0 X0 | ... Bkn Xn
# where coefficients get the new values
# Bki := Akk* Aki
Akk = Tk[Xk]
del Tk[Xk]
AkkStar = Akk('*')
for Xi, Aki in Tk.items():
Bki = AkkStar + Aki
Tk[Xi] = Bki
# Substitute Xk in each other equation in X
# containing Xk, except eqv. Xk itself, which will not be used any more..
del X[Xk]
for Xj, Tj in X.items():
Bjk = Tj.get(Xk)
if Bjk is None:
continue
del Tj[Xk]
for Xji, Tk_Xji in Tk.items():
Cji = (Bjk + Tk_Xji)
Bji = Tj.get(Xji)
if Bji is not None:
Cji = Bji | Cji
Tj[Xji] = Cji
# The equation system is now solved
# The result is in Final term of Start equation
return X[Start][Final]
Nothing = Union()
def SolveFSA(fsa):
RS = RegularSystem(fsa.table, fsa.start_state, fsa.final_states)
return RS.solve() | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
//! The Assets module allows you to read files that have been bundled by tauri
//! during both compile time and runtime.
#[doc(hidden)]
pub use phf;
use std::{
borrow::Cow,
path::{Component, Path},
};
/// The token used for script nonces.
pub const SCRIPT_NONCE_TOKEN: &str = "__TAURI_SCRIPT_NONCE__";
/// The token used for style nonces.
pub const STYLE_NONCE_TOKEN: &str = "__TAURI_STYLE_NONCE__";
/// Assets iterator.
pub type AssetsIter<'a> = dyn Iterator<Item = (Cow<'a, str>, Cow<'a, [u8]>)> + 'a;
/// Represent an asset file path in a normalized way.
///
/// The following rules are enforced and added if needed:
/// * Unix path component separators
/// * Has a root directory
/// * No trailing slash - directories are not included in assets
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct AssetKey(String);
impl From<AssetKey> for String {
fn from(key: AssetKey) -> Self {
key.0
}
}
impl AsRef<str> for AssetKey {
fn as_ref(&self) -> &str {
&self.0
}
}
impl<P: AsRef<Path>> From<P> for AssetKey {
fn from(path: P) -> Self {
// TODO: change this to utilize `Cow` to prevent allocating an intermediate `PathBuf` when not necessary
let path = path.as_ref().to_owned();
// add in root to mimic how it is used from a server url
let path = if path.has_root() {
path
} else {
Path::new(&Component::RootDir).join(path)
};
let buf = if cfg!(windows) {
let mut buf = String::new();
for component in path.components() {
match component {
Component::RootDir => buf.push('/'),
Component::CurDir => buf.push_str("./"),
Component::ParentDir => buf.push_str("../"),
Component::Prefix(prefix) => buf.push_str(&prefix.as_os_str().to_string_lossy()),
Component::Normal(s) => {
buf.push_str(&s.to_string_lossy());
buf.push('/')
}
}
}
// remove the last slash
if buf != "/" {
buf.pop();
}
buf
} else {
path.to_string_lossy().to_string()
};
AssetKey(buf)
}
}
/// A Content-Security-Policy hash value for a specific directive.
/// For more information see [the MDN page](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy#directives).
#[non_exhaustive]
#[derive(Debug, Clone, Copy)]
pub enum CspHash<'a> {
/// The `script-src` directive.
Script(&'a str),
/// The `style-src` directive.
Style(&'a str),
}
impl CspHash<'_> {
/// The Content-Security-Policy directive this hash applies to.
pub fn directive(&self) -> &'static str {
match self {
Self::Script(_) => "script-src",
Self::Style(_) => "style-src",
}
}
/// The value of the Content-Security-Policy hash.
pub fn hash(&self) -> &str {
match self {
Self::Script(hash) => hash,
Self::Style(hash) => hash,
}
}
}
/// [`Assets`] implementation that only contains compile-time compressed and embedded assets.
pub struct EmbeddedAssets {
assets: phf::Map<&'static str, &'static [u8]>,
// Hashes that must be injected to the CSP of every HTML file.
global_hashes: &'static [CspHash<'static>],
// Hashes that are associated to the CSP of the HTML file identified by the map key (the HTML asset key).
html_hashes: phf::Map<&'static str, &'static [CspHash<'static>]>,
}
/// Temporary struct that overrides the Debug formatting for the `assets` field.
///
/// It reduces the output size compared to the default, as that would format the binary
/// data as a slice of numbers like `[65, 66, 67]` for "ABC". This instead shows the length
/// of the slice.
///
/// For example: `{"/index.html": [u8; 1835], "/index.js": [u8; 212]}`
struct DebugAssetMap<'a>(&'a phf::Map<&'static str, &'static [u8]>);
impl std::fmt::Debug for DebugAssetMap<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut map = f.debug_map();
for (k, v) in self.0.entries() {
map.key(k);
map.value(&format_args!("[u8; {}]", v.len()));
}
map.finish()
}
}
impl std::fmt::Debug for EmbeddedAssets {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("EmbeddedAssets")
.field("assets", &DebugAssetMap(&self.assets))
.field("global_hashes", &self.global_hashes)
.field("html_hashes", &self.html_hashes)
.finish()
}
}
impl EmbeddedAssets {
/// Creates a new instance from the given asset map and script hash list.
pub const fn new(
map: phf::Map<&'static str, &'static [u8]>,
global_hashes: &'static [CspHash<'static>],
html_hashes: phf::Map<&'static str, &'static [CspHash<'static>]>,
) -> Self {
Self {
assets: map,
global_hashes,
html_hashes,
}
}
/// Get an asset by key.
#[cfg(feature = "compression")]
pub fn get(&self, key: &AssetKey) -> Option<Cow<'_, [u8]>> {
self
.assets
.get(key.as_ref())
.map(|&(mut asdf)| {
// with the exception of extremely small files, output should usually be
// at least as large as the compressed version.
let mut buf = Vec::with_capacity(asdf.len());
brotli::BrotliDecompress(&mut asdf, &mut buf).map(|()| buf)
})
.and_then(Result::ok)
.map(Cow::Owned)
}
/// Get an asset by key.
#[cfg(not(feature = "compression"))]
pub fn get(&self, key: &AssetKey) -> Option<Cow<'_, [u8]>> {
self
.assets
.get(key.as_ref())
.copied()
.map(|a| Cow::Owned(a.to_vec()))
}
/// Iterate on the assets.
pub fn iter(&self) -> Box<AssetsIter<'_>> {
Box::new(
self
.assets
.into_iter()
.map(|(k, b)| (Cow::Borrowed(*k), Cow::Borrowed(*b))),
)
}
/// CSP hashes for the given asset.
pub fn csp_hashes(&self, html_path: &AssetKey) -> Box<dyn Iterator<Item = CspHash<'_>> + '_> {
Box::new(
self
.global_hashes
.iter()
.chain(
self
.html_hashes
.get(html_path.as_ref())
.copied()
.into_iter()
.flatten(),
)
.copied(),
)
}
} | rust | github | https://github.com/tauri-apps/tauri | crates/tauri-utils/src/assets.rs |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin.internals;
import org.apache.kafka.clients.admin.AbortTransactionSpec;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.ClusterAuthorizationException;
import org.apache.kafka.common.errors.InvalidProducerEpochException;
import org.apache.kafka.common.errors.TransactionCoordinatorFencedException;
import org.apache.kafka.common.errors.UnknownServerException;
import org.apache.kafka.common.message.WriteTxnMarkersRequestData;
import org.apache.kafka.common.message.WriteTxnMarkersResponseData;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.WriteTxnMarkersRequest;
import org.apache.kafka.common.requests.WriteTxnMarkersResponse;
import org.apache.kafka.common.utils.LogContext;
import org.junit.jupiter.api.Test;
import java.util.Set;
import static java.util.Collections.emptyList;
import static java.util.Collections.emptySet;
import static java.util.Collections.singleton;
import static java.util.Collections.singletonList;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class AbortTransactionHandlerTest {
private final LogContext logContext = new LogContext();
private final TopicPartition topicPartition = new TopicPartition("foo", 5);
private final AbortTransactionSpec abortSpec = new AbortTransactionSpec(
topicPartition, 12345L, (short) 15, 4321);
private final Node node = new Node(1, "host", 1234);
@Test
public void testInvalidBuildRequestCall() {
AbortTransactionHandler handler = new AbortTransactionHandler(abortSpec, logContext);
assertThrows(IllegalArgumentException.class, () -> handler.buildRequest(1,
emptySet()));
assertThrows(IllegalArgumentException.class, () -> handler.buildRequest(1,
Set.of(new TopicPartition("foo", 1))));
assertThrows(IllegalArgumentException.class, () -> handler.buildRequest(1,
Set.of(topicPartition, new TopicPartition("foo", 1))));
}
@Test
public void testValidBuildRequestCall() {
AbortTransactionHandler handler = new AbortTransactionHandler(abortSpec, logContext);
WriteTxnMarkersRequest.Builder request = handler.buildBatchedRequest(1, singleton(topicPartition));
assertEquals(1, request.data.markers().size());
WriteTxnMarkersRequestData.WritableTxnMarker markerRequest = request.data.markers().get(0);
assertEquals(abortSpec.producerId(), markerRequest.producerId());
assertEquals(abortSpec.producerEpoch(), markerRequest.producerEpoch());
assertEquals(abortSpec.coordinatorEpoch(), markerRequest.coordinatorEpoch());
assertEquals(1, markerRequest.topics().size());
WriteTxnMarkersRequestData.WritableTxnMarkerTopic topicRequest = markerRequest.topics().get(0);
assertEquals(abortSpec.topicPartition().topic(), topicRequest.name());
assertEquals(singletonList(abortSpec.topicPartition().partition()), topicRequest.partitionIndexes());
}
@Test
public void testInvalidHandleResponseCall() {
AbortTransactionHandler handler = new AbortTransactionHandler(abortSpec, logContext);
WriteTxnMarkersResponseData response = new WriteTxnMarkersResponseData();
assertThrows(IllegalArgumentException.class, () -> handler.handleResponse(node,
emptySet(), new WriteTxnMarkersResponse(response)));
assertThrows(IllegalArgumentException.class, () -> handler.handleResponse(node,
Set.of(new TopicPartition("foo", 1)), new WriteTxnMarkersResponse(response)));
assertThrows(IllegalArgumentException.class, () -> handler.handleResponse(node,
Set.of(topicPartition, new TopicPartition("foo", 1)), new WriteTxnMarkersResponse(response)));
}
@Test
public void testInvalidResponse() {
AbortTransactionHandler handler = new AbortTransactionHandler(abortSpec, logContext);
WriteTxnMarkersResponseData response = new WriteTxnMarkersResponseData();
assertFailed(KafkaException.class, topicPartition, handler.handleResponse(node, singleton(topicPartition),
new WriteTxnMarkersResponse(response)));
WriteTxnMarkersResponseData.WritableTxnMarkerResult markerResponse =
new WriteTxnMarkersResponseData.WritableTxnMarkerResult();
response.markers().add(markerResponse);
assertFailed(KafkaException.class, topicPartition, handler.handleResponse(node, singleton(topicPartition),
new WriteTxnMarkersResponse(response)));
markerResponse.setProducerId(abortSpec.producerId());
assertFailed(KafkaException.class, topicPartition, handler.handleResponse(node, singleton(topicPartition),
new WriteTxnMarkersResponse(response)));
WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult topicResponse =
new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult();
markerResponse.topics().add(topicResponse);
assertFailed(KafkaException.class, topicPartition, handler.handleResponse(node, singleton(topicPartition),
new WriteTxnMarkersResponse(response)));
topicResponse.setName(abortSpec.topicPartition().topic());
assertFailed(KafkaException.class, topicPartition, handler.handleResponse(node, singleton(topicPartition),
new WriteTxnMarkersResponse(response)));
WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult partitionResponse =
new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult();
topicResponse.partitions().add(partitionResponse);
assertFailed(KafkaException.class, topicPartition, handler.handleResponse(node, singleton(topicPartition),
new WriteTxnMarkersResponse(response)));
partitionResponse.setPartitionIndex(abortSpec.topicPartition().partition());
topicResponse.setName(abortSpec.topicPartition().topic() + "random");
assertFailed(KafkaException.class, topicPartition, handler.handleResponse(node, singleton(topicPartition),
new WriteTxnMarkersResponse(response)));
topicResponse.setName(abortSpec.topicPartition().topic());
markerResponse.setProducerId(abortSpec.producerId() + 1);
assertFailed(KafkaException.class, topicPartition, handler.handleResponse(node, singleton(topicPartition),
new WriteTxnMarkersResponse(response)));
}
@Test
public void testSuccessfulResponse() {
assertCompleted(abortSpec.topicPartition(), handleWithError(abortSpec, Errors.NONE));
}
@Test
public void testRetriableErrors() {
assertUnmapped(abortSpec.topicPartition(), handleWithError(abortSpec, Errors.NOT_LEADER_OR_FOLLOWER));
assertUnmapped(abortSpec.topicPartition(), handleWithError(abortSpec, Errors.UNKNOWN_TOPIC_OR_PARTITION));
assertUnmapped(abortSpec.topicPartition(), handleWithError(abortSpec, Errors.REPLICA_NOT_AVAILABLE));
assertUnmapped(abortSpec.topicPartition(), handleWithError(abortSpec, Errors.BROKER_NOT_AVAILABLE));
}
@Test
public void testFatalErrors() {
assertFailed(ClusterAuthorizationException.class, abortSpec.topicPartition(),
handleWithError(abortSpec, Errors.CLUSTER_AUTHORIZATION_FAILED));
assertFailed(InvalidProducerEpochException.class, abortSpec.topicPartition(),
handleWithError(abortSpec, Errors.INVALID_PRODUCER_EPOCH));
assertFailed(TransactionCoordinatorFencedException.class, abortSpec.topicPartition(),
handleWithError(abortSpec, Errors.TRANSACTION_COORDINATOR_FENCED));
assertFailed(UnknownServerException.class, abortSpec.topicPartition(),
handleWithError(abortSpec, Errors.UNKNOWN_SERVER_ERROR));
}
private AdminApiHandler.ApiResult<TopicPartition, Void> handleWithError(
AbortTransactionSpec abortSpec,
Errors error
) {
AbortTransactionHandler handler = new AbortTransactionHandler(abortSpec, logContext);
WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult partitionResponse =
new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult()
.setPartitionIndex(abortSpec.topicPartition().partition())
.setErrorCode(error.code());
WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult topicResponse =
new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult()
.setName(abortSpec.topicPartition().topic());
topicResponse.partitions().add(partitionResponse);
WriteTxnMarkersResponseData.WritableTxnMarkerResult markerResponse =
new WriteTxnMarkersResponseData.WritableTxnMarkerResult()
.setProducerId(abortSpec.producerId());
markerResponse.topics().add(topicResponse);
WriteTxnMarkersResponseData response = new WriteTxnMarkersResponseData();
response.markers().add(markerResponse);
return handler.handleResponse(node, singleton(abortSpec.topicPartition()),
new WriteTxnMarkersResponse(response));
}
private void assertUnmapped(
TopicPartition topicPartition,
AdminApiHandler.ApiResult<TopicPartition, Void> result
) {
assertEquals(emptySet(), result.completedKeys.keySet());
assertEquals(emptySet(), result.failedKeys.keySet());
assertEquals(singletonList(topicPartition), result.unmappedKeys);
}
private void assertCompleted(
TopicPartition topicPartition,
AdminApiHandler.ApiResult<TopicPartition, Void> result
) {
assertEquals(emptySet(), result.failedKeys.keySet());
assertEquals(emptyList(), result.unmappedKeys);
assertEquals(singleton(topicPartition), result.completedKeys.keySet());
assertNull(result.completedKeys.get(topicPartition));
}
private void assertFailed(
Class<? extends Throwable> expectedExceptionType,
TopicPartition topicPartition,
AdminApiHandler.ApiResult<TopicPartition, Void> result
) {
assertEquals(emptySet(), result.completedKeys.keySet());
assertEquals(emptyList(), result.unmappedKeys);
assertEquals(singleton(topicPartition), result.failedKeys.keySet());
assertInstanceOf(expectedExceptionType, result.failedKeys.get(topicPartition));
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/clients/admin/internals/AbortTransactionHandlerTest.java |
# Some useful functions to extract data out of emails
# Copyright (C) 2002-2015 John Goerzen & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import email
from email.Parser import Parser as MailParser
def get_message_date(content, header='Date'):
"""Parses mail and returns resulting timestamp.
:param header: the header to extract date from;
:returns: timestamp or `None` in the case of failure.
"""
message = MailParser().parsestr(content, True)
dateheader = message.get(header)
# parsedate_tz returns a 10-tuple that can be passed to mktime_tz
# Will be None if missing or not in a valid format. Note that
# indexes 6, 7, and 8 of the result tuple are not usable.
datetuple = email.utils.parsedate_tz(dateheader)
if datetuple is None:
return None
return email.utils.mktime_tz(datetuple) | unknown | codeparrot/codeparrot-clean | ||
"""
parser.http.utils module (imdb package).
This module provides miscellaneous utilities used by
the imdb.parser.http classes.
Copyright 2004-2012 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import logging
import warnings
from imdb._exceptions import IMDbError
from imdb.utils import flatten, _Container
from imdb.Movie import Movie
from imdb.Person import Person
from imdb.Character import Character
# Year, imdbIndex and kind.
re_yearKind_index = re.compile(r'(\([0-9\?]{4}(?:/[IVXLCDM]+)?\)(?: \(mini\)| \(TV\)| \(V\)| \(VG\))?)')
# Match imdb ids in href tags
re_imdbid = re.compile(r'(title/tt|name/nm|character/ch|company/co)([0-9]+)')
def analyze_imdbid(href):
"""Return an imdbID from an URL."""
if not href:
return None
match = re_imdbid.search(href)
if not match:
return None
return str(match.group(2))
_modify_keys = list(Movie.keys_tomodify_list) + list(Person.keys_tomodify_list)
def _putRefs(d, re_titles, re_names, re_characters, lastKey=None):
"""Iterate over the strings inside list items or dictionary values,
substitutes movie titles and person names with the (qv) references."""
if isinstance(d, list):
for i in xrange(len(d)):
if isinstance(d[i], (unicode, str)):
if lastKey in _modify_keys:
if re_names:
d[i] = re_names.sub(ur"'\1' (qv)", d[i])
if re_titles:
d[i] = re_titles.sub(ur'_\1_ (qv)', d[i])
if re_characters:
d[i] = re_characters.sub(ur'#\1# (qv)', d[i])
elif isinstance(d[i], (list, dict)):
_putRefs(d[i], re_titles, re_names, re_characters,
lastKey=lastKey)
elif isinstance(d, dict):
for k, v in d.items():
lastKey = k
if isinstance(v, (unicode, str)):
if lastKey in _modify_keys:
if re_names:
d[k] = re_names.sub(ur"'\1' (qv)", v)
if re_titles:
d[k] = re_titles.sub(ur'_\1_ (qv)', v)
if re_characters:
d[k] = re_characters.sub(ur'#\1# (qv)', v)
elif isinstance(v, (list, dict)):
_putRefs(d[k], re_titles, re_names, re_characters,
lastKey=lastKey)
# Handle HTML/XML/SGML entities.
from htmlentitydefs import entitydefs
entitydefs = entitydefs.copy()
entitydefsget = entitydefs.get
entitydefs['nbsp'] = ' '
sgmlentity = {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\'', 'ndash': '-'}
sgmlentityget = sgmlentity.get
_sgmlentkeys = sgmlentity.keys()
entcharrefs = {}
entcharrefsget = entcharrefs.get
for _k, _v in entitydefs.items():
if _k in _sgmlentkeys: continue
if _v[0:2] == '&#':
dec_code = _v[1:-1]
_v = unichr(int(_v[2:-1]))
entcharrefs[dec_code] = _v
else:
dec_code = '#' + str(ord(_v))
_v = unicode(_v, 'latin_1', 'replace')
entcharrefs[dec_code] = _v
entcharrefs[_k] = _v
del _sgmlentkeys, _k, _v
entcharrefs['#160'] = u' '
entcharrefs['#xA0'] = u' '
entcharrefs['#xa0'] = u' '
entcharrefs['#XA0'] = u' '
entcharrefs['#x22'] = u'"'
entcharrefs['#X22'] = u'"'
# convert &x26; to &, to make BeautifulSoup happy; beware that this
# leaves lone '&' in the html broken, but I assume this is better than
# the contrary...
entcharrefs['#38'] = u'&'
entcharrefs['#x26'] = u'&'
entcharrefs['#x26'] = u'&'
re_entcharrefs = re.compile('&(%s|\#160|\#\d{1,5}|\#x[0-9a-f]{1,4});' %
'|'.join(map(re.escape, entcharrefs)), re.I)
re_entcharrefssub = re_entcharrefs.sub
sgmlentity.update(dict([('#34', u'"'), ('#38', u'&'),
('#60', u'<'), ('#62', u'>'), ('#39', u"'")]))
re_sgmlref = re.compile('&(%s);' % '|'.join(map(re.escape, sgmlentity)))
re_sgmlrefsub = re_sgmlref.sub
# Matches XML-only single tags, like <br/> ; they are invalid in HTML,
# but widely used by IMDb web site. :-/
re_xmltags = re.compile('<([a-zA-Z]+)/>')
def _replXMLRef(match):
"""Replace the matched XML/HTML entities and references;
replace everything except sgml entities like <, >, ..."""
ref = match.group(1)
value = entcharrefsget(ref)
if value is None:
if ref[0] == '#':
ref_code = ref[1:]
if ref_code in ('34', '38', '60', '62', '39'):
return match.group(0)
elif ref_code[0].lower() == 'x':
#if ref[2:] == '26':
# # Don't convert &x26; to &, to make BeautifulSoup happy.
# return '&'
return unichr(int(ref[2:], 16))
else:
return unichr(int(ref[1:]))
else:
return ref
return value
def subXMLRefs(s):
"""Return the given html string with entity and char references
replaced."""
return re_entcharrefssub(_replXMLRef, s)
# XXX: no more used here; move it to mobile (they are imported by helpers, too)?
def _replSGMLRefs(match):
"""Replace the matched SGML entity."""
ref = match.group(1)
return sgmlentityget(ref, ref)
def subSGMLRefs(s):
"""Return the given html string with sgml entity and char references
replaced."""
return re_sgmlrefsub(_replSGMLRefs, s)
_b_p_logger = logging.getLogger('imdbpy.parser.http.build_person')
def build_person(txt, personID=None, billingPos=None,
roleID=None, accessSystem='http', modFunct=None):
"""Return a Person instance from the tipical <tr>...</tr> strings
found in the IMDb's web site."""
#if personID is None
# _b_p_logger.debug('empty name or personID for "%s"', txt)
notes = u''
role = u''
# Search the (optional) separator between name and role/notes.
if txt.find('....') != -1:
sep = '....'
elif txt.find('...') != -1:
sep = '...'
else:
sep = '...'
# Replace the first parenthesis, assuming there are only
# notes, after.
# Rationale: no imdbIndex is (ever?) showed on the web site.
txt = txt.replace('(', '...(', 1)
txt_split = txt.split(sep, 1)
name = txt_split[0].strip()
if len(txt_split) == 2:
role_comment = txt_split[1].strip()
# Strip common endings.
if role_comment[-4:] == ' and':
role_comment = role_comment[:-4].rstrip()
elif role_comment[-2:] == ' &':
role_comment = role_comment[:-2].rstrip()
elif role_comment[-6:] == '& ....':
role_comment = role_comment[:-6].rstrip()
# Get the notes.
if roleID is not None:
if not isinstance(roleID, list):
cmt_idx = role_comment.find('(')
if cmt_idx != -1:
role = role_comment[:cmt_idx].rstrip()
notes = role_comment[cmt_idx:]
else:
# Just a role, without notes.
role = role_comment
else:
role = role_comment
else:
# We're managing something that doesn't have a 'role', so
# everything are notes.
notes = role_comment
if role == '....': role = u''
roleNotes = []
# Manages multiple roleIDs.
if isinstance(roleID, list):
rolesplit = role.split('/')
role = []
for r in rolesplit:
nidx = r.find('(')
if nidx != -1:
role.append(r[:nidx].rstrip())
roleNotes.append(r[nidx:])
else:
role.append(r)
roleNotes.append(None)
lr = len(role)
lrid = len(roleID)
if lr > lrid:
roleID += [None] * (lrid - lr)
elif lr < lrid:
roleID = roleID[:lr]
for i, rid in enumerate(roleID):
if rid is not None:
roleID[i] = str(rid)
if lr == 1:
role = role[0]
roleID = roleID[0]
notes = roleNotes[0] or u''
elif roleID is not None:
roleID = str(roleID)
if personID is not None:
personID = str(personID)
if (not name) or (personID is None):
# Set to 'debug', since build_person is expected to receive some crap.
_b_p_logger.debug('empty name or personID for "%s"', txt)
# XXX: return None if something strange is detected?
person = Person(name=name, personID=personID, currentRole=role,
roleID=roleID, notes=notes, billingPos=billingPos,
modFunct=modFunct, accessSystem=accessSystem)
if roleNotes and len(roleNotes) == len(roleID):
for idx, role in enumerate(person.currentRole):
if roleNotes[idx]:
role.notes = roleNotes[idx]
return person
_re_chrIDs = re.compile('[0-9]{7}')
_b_m_logger = logging.getLogger('imdbpy.parser.http.build_movie')
# To shrink spaces.
re_spaces = re.compile(r'\s+')
def build_movie(txt, movieID=None, roleID=None, status=None,
accessSystem='http', modFunct=None, _parsingCharacter=False,
_parsingCompany=False, year=None, chrRoles=None,
rolesNoChar=None, additionalNotes=None):
"""Given a string as normally seen on the "categorized" page of
a person on the IMDb's web site, returns a Movie instance."""
# FIXME: Oook, lets face it: build_movie and build_person are now
# two horrible sets of patches to support the new IMDb design. They
# must be rewritten from scratch.
if _parsingCharacter:
_defSep = ' Played by '
elif _parsingCompany:
_defSep = ' ... '
else:
_defSep = ' .... '
title = re_spaces.sub(' ', txt).strip()
# Split the role/notes from the movie title.
tsplit = title.split(_defSep, 1)
role = u''
notes = u''
roleNotes = []
if len(tsplit) == 2:
title = tsplit[0].rstrip()
role = tsplit[1].lstrip()
if title[-9:] == 'TV Series':
title = title[:-9].rstrip()
#elif title[-7:] == '(short)':
# title = title[:-7].rstrip()
#elif title[-11:] == '(TV series)':
# title = title[:-11].rstrip()
#elif title[-10:] == '(TV movie)':
# title = title[:-10].rstrip()
elif title[-14:] == 'TV mini-series':
title = title[:-14] + ' (mini)'
if title and title.endswith(_defSep.rstrip()):
title = title[:-len(_defSep)+1]
# Try to understand where the movie title ends.
while True:
if year:
break
if title[-1:] != ')':
# Ignore the silly "TV Series" notice.
if title[-9:] == 'TV Series':
title = title[:-9].rstrip()
continue
else:
# Just a title: stop here.
break
# Try to match paired parentheses; yes: sometimes there are
# parentheses inside comments...
nidx = title.rfind('(')
while (nidx != -1 and \
title[nidx:].count('(') != title[nidx:].count(')')):
nidx = title[:nidx].rfind('(')
# Unbalanced parentheses: stop here.
if nidx == -1: break
# The last item in parentheses seems to be a year: stop here.
first4 = title[nidx+1:nidx+5]
if (first4.isdigit() or first4 == '????') and \
title[nidx+5:nidx+6] in (')', '/'): break
# The last item in parentheses is a known kind: stop here.
if title[nidx+1:-1] in ('TV', 'V', 'mini', 'VG', 'TV movie',
'TV series', 'short'): break
# Else, in parentheses there are some notes.
# XXX: should the notes in the role half be kept separated
# from the notes in the movie title half?
if notes: notes = '%s %s' % (title[nidx:], notes)
else: notes = title[nidx:]
title = title[:nidx].rstrip()
if year:
year = year.strip()
if title[-1:] == ')':
fpIdx = title.rfind('(')
if fpIdx != -1:
if notes: notes = '%s %s' % (title[fpIdx:], notes)
else: notes = title[fpIdx:]
title = title[:fpIdx].rstrip()
title = u'%s (%s)' % (title, year)
if _parsingCharacter and roleID and not role:
roleID = None
if not roleID:
roleID = None
elif len(roleID) == 1:
roleID = roleID[0]
if not role and chrRoles and isinstance(roleID, (str, unicode)):
roleID = _re_chrIDs.findall(roleID)
role = ' / '.join(filter(None, chrRoles.split('@@')))
# Manages multiple roleIDs.
if isinstance(roleID, list):
tmprole = role.split('/')
role = []
for r in tmprole:
nidx = r.find('(')
if nidx != -1:
role.append(r[:nidx].rstrip())
roleNotes.append(r[nidx:])
else:
role.append(r)
roleNotes.append(None)
lr = len(role)
lrid = len(roleID)
if lr > lrid:
roleID += [None] * (lrid - lr)
elif lr < lrid:
roleID = roleID[:lr]
for i, rid in enumerate(roleID):
if rid is not None:
roleID[i] = str(rid)
if lr == 1:
role = role[0]
roleID = roleID[0]
elif roleID is not None:
roleID = str(roleID)
if movieID is not None:
movieID = str(movieID)
if (not title) or (movieID is None):
_b_m_logger.error('empty title or movieID for "%s"', txt)
if rolesNoChar:
rolesNoChar = filter(None, [x.strip() for x in rolesNoChar.split('/')])
if not role:
role = []
elif not isinstance(role, list):
role = [role]
role += rolesNoChar
notes = notes.strip()
if additionalNotes:
additionalNotes = re_spaces.sub(' ', additionalNotes).strip()
if notes:
notes += u' '
notes += additionalNotes
if role and isinstance(role, list) and notes.endswith(role[-1].replace('\n', ' ')):
role = role[:-1]
m = Movie(title=title, movieID=movieID, notes=notes, currentRole=role,
roleID=roleID, roleIsPerson=_parsingCharacter,
modFunct=modFunct, accessSystem=accessSystem)
if roleNotes and len(roleNotes) == len(roleID):
for idx, role in enumerate(m.currentRole):
try:
if roleNotes[idx]:
role.notes = roleNotes[idx]
except IndexError:
break
# Status can't be checked here, and must be detected by the parser.
if status:
m['status'] = status
return m
class DOMParserBase(object):
"""Base parser to handle HTML data from the IMDb's web server."""
_defGetRefs = False
_containsObjects = False
preprocessors = []
extractors = []
usingModule = None
_logger = logging.getLogger('imdbpy.parser.http.domparser')
def __init__(self, useModule=None):
"""Initialize the parser. useModule can be used to force it
to use 'BeautifulSoup' or 'lxml'; by default, it's auto-detected,
using 'lxml' if available and falling back to 'BeautifulSoup'
otherwise."""
# Module to use.
if useModule is None:
useModule = ('lxml', 'BeautifulSoup')
if not isinstance(useModule, (tuple, list)):
useModule = [useModule]
self._useModule = useModule
nrMods = len(useModule)
_gotError = False
for idx, mod in enumerate(useModule):
mod = mod.strip().lower()
try:
if mod == 'lxml':
from lxml.html import fromstring
from lxml.etree import tostring
self._is_xml_unicode = False
self.usingModule = 'lxml'
elif mod == 'beautifulsoup':
from bsouplxml.html import fromstring
from bsouplxml.etree import tostring
self._is_xml_unicode = True
self.usingModule = 'beautifulsoup'
else:
self._logger.warn('unknown module "%s"' % mod)
continue
self.fromstring = fromstring
self._tostring = tostring
if _gotError:
warnings.warn('falling back to "%s"' % mod)
break
except ImportError, e:
if idx+1 >= nrMods:
# Raise the exception, if we don't have any more
# options to try.
raise IMDbError('unable to use any parser in %s: %s' % \
(str(useModule), str(e)))
else:
warnings.warn('unable to use "%s": %s' % (mod, str(e)))
_gotError = True
continue
else:
raise IMDbError('unable to use parsers in %s' % str(useModule))
# Fall-back defaults.
self._modFunct = None
self._as = 'http'
self._cname = self.__class__.__name__
self._init()
self.reset()
def reset(self):
"""Reset the parser."""
# Names and titles references.
self._namesRefs = {}
self._titlesRefs = {}
self._charactersRefs = {}
self._reset()
def _init(self):
"""Subclasses can override this method, if needed."""
pass
def _reset(self):
"""Subclasses can override this method, if needed."""
pass
def parse(self, html_string, getRefs=None, **kwds):
"""Return the dictionary generated from the given html string;
getRefs can be used to force the gathering of movies/persons/characters
references."""
self.reset()
if getRefs is not None:
self.getRefs = getRefs
else:
self.getRefs = self._defGetRefs
# Useful only for the testsuite.
if not isinstance(html_string, unicode):
html_string = unicode(html_string, 'latin_1', 'replace')
html_string = subXMLRefs(html_string)
# Temporary fix: self.parse_dom must work even for empty strings.
html_string = self.preprocess_string(html_string)
html_string = html_string.strip()
if self.usingModule == 'beautifulsoup':
# tag attributes like title=""Family Guy"" will be
# converted to title=""Family Guy"" and this confuses BeautifulSoup.
html_string = html_string.replace('""', '"')
# Browser-specific escapes create problems to BeautifulSoup.
html_string = html_string.replace('<!--[if IE]>', '"')
html_string = html_string.replace('<![endif]-->', '"')
#print html_string.encode('utf8')
if html_string:
dom = self.get_dom(html_string)
#print self.tostring(dom).encode('utf8')
try:
dom = self.preprocess_dom(dom)
except Exception, e:
self._logger.error('%s: caught exception preprocessing DOM',
self._cname, exc_info=True)
if self.getRefs:
try:
self.gather_refs(dom)
except Exception, e:
self._logger.warn('%s: unable to gather refs: %s',
self._cname, exc_info=True)
data = self.parse_dom(dom)
else:
data = {}
try:
data = self.postprocess_data(data)
except Exception, e:
self._logger.error('%s: caught exception postprocessing data',
self._cname, exc_info=True)
if self._containsObjects:
self.set_objects_params(data)
data = self.add_refs(data)
return data
def _build_empty_dom(self):
from bsouplxml import _bsoup
return _bsoup.BeautifulSoup('')
def get_dom(self, html_string):
"""Return a dom object, from the given string."""
try:
dom = self.fromstring(html_string)
if dom is None:
dom = self._build_empty_dom()
self._logger.error('%s: using a fake empty DOM', self._cname)
return dom
except Exception, e:
self._logger.error('%s: caught exception parsing DOM',
self._cname, exc_info=True)
return self._build_empty_dom()
def xpath(self, element, path):
"""Return elements matching the given XPath."""
try:
xpath_result = element.xpath(path)
if self._is_xml_unicode:
return xpath_result
result = []
for item in xpath_result:
if isinstance(item, str):
item = unicode(item)
result.append(item)
return result
except Exception, e:
self._logger.error('%s: caught exception extracting XPath "%s"',
self._cname, path, exc_info=True)
return []
def tostring(self, element):
"""Convert the element to a string."""
if isinstance(element, (unicode, str)):
return unicode(element)
else:
try:
return self._tostring(element, encoding=unicode)
except Exception, e:
self._logger.error('%s: unable to convert to string',
self._cname, exc_info=True)
return u''
def clone(self, element):
"""Clone an element."""
return self.fromstring(self.tostring(element))
def preprocess_string(self, html_string):
"""Here we can modify the text, before it's parsed."""
if not html_string:
return html_string
# Remove silly » and – chars.
html_string = html_string.replace(u' \xbb', u'')
html_string = html_string.replace(u'–', u'-')
try:
preprocessors = self.preprocessors
except AttributeError:
return html_string
for src, sub in preprocessors:
# re._pattern_type is present only since Python 2.5.
if callable(getattr(src, 'sub', None)):
html_string = src.sub(sub, html_string)
elif isinstance(src, str):
html_string = html_string.replace(src, sub)
elif callable(src):
try:
html_string = src(html_string)
except Exception, e:
_msg = '%s: caught exception preprocessing html'
self._logger.error(_msg, self._cname, exc_info=True)
continue
##print html_string.encode('utf8')
return html_string
def gather_refs(self, dom):
"""Collect references."""
grParser = GatherRefs(useModule=self._useModule)
grParser._as = self._as
grParser._modFunct = self._modFunct
refs = grParser.parse_dom(dom)
refs = grParser.postprocess_data(refs)
self._namesRefs = refs['names refs']
self._titlesRefs = refs['titles refs']
self._charactersRefs = refs['characters refs']
def preprocess_dom(self, dom):
"""Last chance to modify the dom, before the rules in self.extractors
are applied by the parse_dom method."""
return dom
def parse_dom(self, dom):
"""Parse the given dom according to the rules specified
in self.extractors."""
result = {}
for extractor in self.extractors:
##print extractor.label
if extractor.group is None:
elements = [(extractor.label, element)
for element in self.xpath(dom, extractor.path)]
else:
groups = self.xpath(dom, extractor.group)
elements = []
for group in groups:
group_key = self.xpath(group, extractor.group_key)
if not group_key: continue
group_key = group_key[0]
# XXX: always tries the conversion to unicode:
# BeautifulSoup.NavigableString is a subclass
# of unicode, and so it's never converted.
group_key = self.tostring(group_key)
normalizer = extractor.group_key_normalize
if normalizer is not None:
if callable(normalizer):
try:
group_key = normalizer(group_key)
except Exception, e:
_m = '%s: unable to apply group_key normalizer'
self._logger.error(_m, self._cname,
exc_info=True)
group_elements = self.xpath(group, extractor.path)
elements.extend([(group_key, element)
for element in group_elements])
for group_key, element in elements:
for attr in extractor.attrs:
if isinstance(attr.path, dict):
data = {}
for field in attr.path.keys():
path = attr.path[field]
value = self.xpath(element, path)
if not value:
data[field] = None
else:
# XXX: use u'' , to join?
data[field] = ''.join(value)
else:
data = self.xpath(element, attr.path)
if not data:
data = None
else:
data = attr.joiner.join(data)
if not data:
continue
attr_postprocess = attr.postprocess
if callable(attr_postprocess):
try:
data = attr_postprocess(data)
except Exception, e:
_m = '%s: unable to apply attr postprocess'
self._logger.error(_m, self._cname, exc_info=True)
key = attr.key
if key is None:
key = group_key
elif key.startswith('.'):
# assuming this is an xpath
try:
key = self.xpath(element, key)[0]
except IndexError:
self._logger.error('%s: XPath returned no items',
self._cname, exc_info=True)
elif key.startswith('self.'):
key = getattr(self, key[5:])
if attr.multi:
if key not in result:
result[key] = []
result[key].append(data)
else:
if isinstance(data, dict):
result.update(data)
else:
result[key] = data
return result
def postprocess_data(self, data):
"""Here we can modify the data."""
return data
def set_objects_params(self, data):
"""Set parameters of Movie/Person/... instances, since they are
not always set in the parser's code."""
for obj in flatten(data, yieldDictKeys=True, scalar=_Container):
obj.accessSystem = self._as
obj.modFunct = self._modFunct
def add_refs(self, data):
"""Modify data according to the expected output."""
if self.getRefs:
titl_re = ur'(%s)' % '|'.join([re.escape(x) for x
in self._titlesRefs.keys()])
if titl_re != ur'()': re_titles = re.compile(titl_re, re.U)
else: re_titles = None
nam_re = ur'(%s)' % '|'.join([re.escape(x) for x
in self._namesRefs.keys()])
if nam_re != ur'()': re_names = re.compile(nam_re, re.U)
else: re_names = None
chr_re = ur'(%s)' % '|'.join([re.escape(x) for x
in self._charactersRefs.keys()])
if chr_re != ur'()': re_characters = re.compile(chr_re, re.U)
else: re_characters = None
_putRefs(data, re_titles, re_names, re_characters)
return {'data': data, 'titlesRefs': self._titlesRefs,
'namesRefs': self._namesRefs,
'charactersRefs': self._charactersRefs}
class Extractor(object):
"""Instruct the DOM parser about how to parse a document."""
def __init__(self, label, path, attrs, group=None, group_key=None,
group_key_normalize=None):
"""Initialize an Extractor object, used to instruct the DOM parser
about how to parse a document."""
# rarely (never?) used, mostly for debugging purposes.
self.label = label
self.group = group
if group_key is None:
self.group_key = ".//text()"
else:
self.group_key = group_key
self.group_key_normalize = group_key_normalize
self.path = path
# A list of attributes to fetch.
if isinstance(attrs, Attribute):
attrs = [attrs]
self.attrs = attrs
def __repr__(self):
"""String representation of an Extractor object."""
r = '<Extractor id:%s (label=%s, path=%s, attrs=%s, group=%s, ' \
'group_key=%s group_key_normalize=%s)>' % (id(self),
self.label, self.path, repr(self.attrs), self.group,
self.group_key, self.group_key_normalize)
return r
class Attribute(object):
"""The attribute to consider, for a given node."""
def __init__(self, key, multi=False, path=None, joiner=None,
postprocess=None):
"""Initialize an Attribute object, used to specify the
attribute to consider, for a given node."""
# The key under which information will be saved; can be a string or an
# XPath. If None, the label of the containing extractor will be used.
self.key = key
self.multi = multi
self.path = path
if joiner is None:
joiner = ''
self.joiner = joiner
# Post-process this set of information.
self.postprocess = postprocess
def __repr__(self):
"""String representation of an Attribute object."""
r = '<Attribute id:%s (key=%s, multi=%s, path=%s, joiner=%s, ' \
'postprocess=%s)>' % (id(self), self.key,
self.multi, repr(self.path),
self.joiner, repr(self.postprocess))
return r
def _parse_ref(text, link, info):
"""Manage links to references."""
if link.find('/title/tt') != -1:
yearK = re_yearKind_index.match(info)
if yearK and yearK.start() == 0:
text += ' %s' % info[:yearK.end()]
return (text.replace('\n', ' '), link)
class GatherRefs(DOMParserBase):
"""Parser used to gather references to movies, persons and characters."""
_attrs = [Attribute(key=None, multi=True,
path={
'text': './text()',
'link': './@href',
'info': './following::text()[1]'
},
postprocess=lambda x: _parse_ref(x.get('text') or u'', x.get('link') or '',
(x.get('info') or u'').strip()))]
extractors = [
Extractor(label='names refs',
path="//a[starts-with(@href, '/name/nm')][string-length(@href)=16]",
attrs=_attrs),
Extractor(label='titles refs',
path="//a[starts-with(@href, '/title/tt')]" \
"[string-length(@href)=17]",
attrs=_attrs),
Extractor(label='characters refs',
path="//a[starts-with(@href, '/character/ch')]" \
"[string-length(@href)=21]",
attrs=_attrs),
]
def postprocess_data(self, data):
result = {}
for item in ('names refs', 'titles refs', 'characters refs'):
result[item] = {}
for k, v in data.get(item, []):
k = k.strip()
v = v.strip()
if not (k and v):
continue
if not v.endswith('/'): continue
imdbID = analyze_imdbid(v)
if item == 'names refs':
obj = Person(personID=imdbID, name=k,
accessSystem=self._as, modFunct=self._modFunct)
elif item == 'titles refs':
obj = Movie(movieID=imdbID, title=k,
accessSystem=self._as, modFunct=self._modFunct)
else:
obj = Character(characterID=imdbID, name=k,
accessSystem=self._as, modFunct=self._modFunct)
# XXX: companies aren't handled: are they ever found in text,
# as links to their page?
result[item][k] = obj
return result
def add_refs(self, data):
return data | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
import os.path
import pathlib
import sys
from jinja2 import Environment, FileSystemLoader
from ansible.module_utils._text import to_bytes
# Pylint doesn't understand Python3 namespace modules.
from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
from ..commands import Command # pylint: disable=relative-beyond-top-level
DEFAULT_TEMPLATE_FILE = pathlib.Path(__file__).parents[4] / 'docs/templates/man.j2'
# from https://www.python.org/dev/peps/pep-0257/
def trim_docstring(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def get_options(optlist):
''' get actual options '''
opts = []
for opt in optlist:
res = {
'desc': opt.help,
'options': opt.option_strings
}
if isinstance(opt, argparse._StoreAction):
res['arg'] = opt.dest.upper()
elif not res['options']:
continue
opts.append(res)
return opts
def dedupe_groups(parser):
action_groups = []
for action_group in parser._action_groups:
found = False
for a in action_groups:
if a._actions == action_group._actions:
found = True
break
if not found:
action_groups.append(action_group)
return action_groups
def get_option_groups(option_parser):
groups = []
for action_group in dedupe_groups(option_parser)[1:]:
group_info = {}
group_info['desc'] = action_group.description
group_info['options'] = action_group._actions
group_info['group_obj'] = action_group
groups.append(group_info)
return groups
def opt_doc_list(parser):
''' iterate over options lists '''
results = []
for option_group in dedupe_groups(parser)[1:]:
results.extend(get_options(option_group._actions))
results.extend(get_options(parser._actions))
return results
# def opts_docs(cli, name):
def opts_docs(cli_class_name, cli_module_name):
''' generate doc structure from options '''
cli_name = 'ansible-%s' % cli_module_name
if cli_module_name == 'adhoc':
cli_name = 'ansible'
# WIth no action/subcommand
# shared opts set
# instantiate each cli and ask its options
cli_klass = getattr(__import__("ansible.cli.%s" % cli_module_name,
fromlist=[cli_class_name]), cli_class_name)
cli = cli_klass([cli_name])
# parse the common options
try:
cli.init_parser()
except Exception:
pass
# base/common cli info
docs = {
'cli': cli_module_name,
'cli_name': cli_name,
'usage': cli.parser.format_usage(),
'short_desc': cli.parser.description,
'long_desc': trim_docstring(cli.__doc__),
'actions': {},
'content_depth': 2,
}
option_info = {'option_names': [],
'options': [],
'groups': []}
for extras in ('ARGUMENTS'):
if hasattr(cli, extras):
docs[extras.lower()] = getattr(cli, extras)
common_opts = opt_doc_list(cli.parser)
groups_info = get_option_groups(cli.parser)
shared_opt_names = []
for opt in common_opts:
shared_opt_names.extend(opt.get('options', []))
option_info['options'] = common_opts
option_info['option_names'] = shared_opt_names
option_info['groups'].extend(groups_info)
docs.update(option_info)
# now for each action/subcommand
# force populate parser with per action options
def get_actions(parser, docs):
# use class attrs not the attrs on a instance (not that it matters here...)
try:
subparser = parser._subparsers._group_actions[0].choices
except AttributeError:
subparser = {}
depth = 0
for action, parser in subparser.items():
action_info = {'option_names': [],
'options': [],
'actions': {}}
# docs['actions'][action] = {}
# docs['actions'][action]['name'] = action
action_info['name'] = action
action_info['desc'] = trim_docstring(getattr(cli, 'execute_%s' % action).__doc__)
# docs['actions'][action]['desc'] = getattr(cli, 'execute_%s' % action).__doc__.strip()
action_doc_list = opt_doc_list(parser)
uncommon_options = []
for action_doc in action_doc_list:
# uncommon_options = []
option_aliases = action_doc.get('options', [])
for option_alias in option_aliases:
if option_alias in shared_opt_names:
continue
# TODO: use set
if option_alias not in action_info['option_names']:
action_info['option_names'].append(option_alias)
if action_doc in action_info['options']:
continue
uncommon_options.append(action_doc)
action_info['options'] = uncommon_options
depth = 1 + get_actions(parser, action_info)
docs['actions'][action] = action_info
return depth
action_depth = get_actions(cli.parser, docs)
docs['content_depth'] = action_depth + 1
docs['options'] = opt_doc_list(cli.parser)
return docs
class GenerateMan(Command):
name = 'generate-man'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(name=cls.name,
description='Generate cli documentation from cli docstrings')
parser.add_argument("-t", "--template-file", action="store", dest="template_file",
default=DEFAULT_TEMPLATE_FILE, help="path to jinja2 template")
parser.add_argument("-o", "--output-dir", action="store", dest="output_dir",
default='/tmp/', help="Output directory for rst files")
parser.add_argument("-f", "--output-format", action="store", dest="output_format",
default='man',
help="Output format for docs (the default 'man' or 'rst')")
parser.add_argument('cli_modules', help='CLI module name(s)', metavar='MODULE_NAME', nargs='*')
@staticmethod
def main(args):
template_file = args.template_file
template_path = os.path.expanduser(template_file)
template_dir = os.path.abspath(os.path.dirname(template_path))
template_basename = os.path.basename(template_file)
output_dir = os.path.abspath(args.output_dir)
output_format = args.output_format
cli_modules = args.cli_modules
# various cli parsing things checks sys.argv if the 'args' that are passed in are []
# so just remove any args so the cli modules dont try to parse them resulting in warnings
sys.argv = [sys.argv[0]]
allvars = {}
output = {}
cli_list = []
cli_bin_name_list = []
# for binary in os.listdir('../../lib/ansible/cli'):
for cli_module_name in cli_modules:
binary = os.path.basename(os.path.expanduser(cli_module_name))
if not binary.endswith('.py'):
continue
elif binary == '__init__.py':
continue
cli_name = os.path.splitext(binary)[0]
if cli_name == 'adhoc':
cli_class_name = 'AdHocCLI'
# myclass = 'AdHocCLI'
output[cli_name] = 'ansible.1.rst.in'
cli_bin_name = 'ansible'
else:
# myclass = "%sCLI" % libname.capitalize()
cli_class_name = "%sCLI" % cli_name.capitalize()
output[cli_name] = 'ansible-%s.1.rst.in' % cli_name
cli_bin_name = 'ansible-%s' % cli_name
# FIXME:
allvars[cli_name] = opts_docs(cli_class_name, cli_name)
cli_bin_name_list.append(cli_bin_name)
cli_list = allvars.keys()
doc_name_formats = {'man': '%s.1.rst.in',
'rst': '%s.rst'}
for cli_name in cli_list:
# template it!
env = Environment(loader=FileSystemLoader(template_dir))
template = env.get_template(template_basename)
# add rest to vars
tvars = allvars[cli_name]
tvars['cli_list'] = cli_list
tvars['cli_bin_name_list'] = cli_bin_name_list
tvars['cli'] = cli_name
if '-i' in tvars['options']:
print('uses inventory')
manpage = template.render(tvars)
filename = os.path.join(output_dir, doc_name_formats[output_format] % tvars['cli_name'])
update_file_if_different(filename, to_bytes(manpage)) | unknown | codeparrot/codeparrot-clean | ||
set -o verbose
rm -rf \
/data/db/* \
mongo-diskstats* \
mongo-*.tgz \
mongo-*.zst \
~/.aws \
~/.boto \
venv \
/data/install \
/data/multiversion
exit 0 | unknown | github | https://github.com/mongodb/mongo | evergreen/cleanup_environment.sh |
#!/usr/bin/python -u
import sys
import libxml2
#memory debug specific
libxml2.debugMemory(1)
called = ""
def foo(ctx, x):
global called
#
# test that access to the XPath evaluation contexts
#
pctxt = libxml2.xpathParserContext(_obj=ctx)
ctxt = pctxt.context()
called = ctxt.function()
return x + 1
def bar(ctxt, x):
return "%d" % (x + 2)
doc = libxml2.parseFile("tst.xml")
ctxt = doc.xpathNewContext()
res = ctxt.xpathEval("//*")
if len(res) != 2:
print("xpath query: wrong node set size")
sys.exit(1)
if res[0].name != "doc" or res[1].name != "foo":
print("xpath query: wrong node set value")
sys.exit(1)
libxml2.registerXPathFunction(ctxt._o, "foo", None, foo)
libxml2.registerXPathFunction(ctxt._o, "bar", None, bar)
i = 10000
while i > 0:
res = ctxt.xpathEval("foo(1)")
if res != 2:
print("xpath extension failure")
sys.exit(1)
i = i - 1
i = 10000
while i > 0:
res = ctxt.xpathEval("bar(1)")
if res != "3":
print("xpath extension failure got %s expecting '3'")
sys.exit(1)
i = i - 1
doc.freeDoc()
ctxt.xpathFreeContext()
if called != "foo":
print("xpath function: failed to access the context")
print("xpath function: %s" % (called))
sys.exit(1)
#memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory() | unknown | codeparrot/codeparrot-clean | ||
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.osv import osv, fields
from openerp.tools.translate import _
class ir_logging(osv.Model):
_name = 'ir.logging'
_order = 'id DESC'
EXCEPTIONS_TYPE = [
('client', 'Client'),
('server', 'Server')
]
_columns = {
'create_date': fields.datetime('Create Date', readonly=True),
'create_uid': fields.integer('Uid', readonly=True), # Integer not m2o is intentionnal
'name': fields.char('Name', required=True),
'type': fields.selection(EXCEPTIONS_TYPE, string='Type', required=True, select=True),
'dbname': fields.char('Database Name', select=True),
'level': fields.char('Level', select=True),
'message': fields.text('Message', required=True),
'path': fields.char('Path', required=True),
'func': fields.char('Function', required=True),
'line': fields.char('Line', required=True),
} | unknown | codeparrot/codeparrot-clean | ||
# Generated by h2py from /usr/include/netinet/in.h
# Included from net/nh.h
# Included from sys/machine.h
LITTLE_ENDIAN = 1234
BIG_ENDIAN = 4321
PDP_ENDIAN = 3412
BYTE_ORDER = BIG_ENDIAN
DEFAULT_GPR = 0xDEADBEEF
MSR_EE = 0x8000
MSR_PR = 0x4000
MSR_FP = 0x2000
MSR_ME = 0x1000
MSR_FE = 0x0800
MSR_FE0 = 0x0800
MSR_SE = 0x0400
MSR_BE = 0x0200
MSR_IE = 0x0100
MSR_FE1 = 0x0100
MSR_AL = 0x0080
MSR_IP = 0x0040
MSR_IR = 0x0020
MSR_DR = 0x0010
MSR_PM = 0x0004
DEFAULT_MSR = (MSR_EE | MSR_ME | MSR_AL | MSR_IR | MSR_DR)
DEFAULT_USER_MSR = (DEFAULT_MSR | MSR_PR)
CR_LT = 0x80000000
CR_GT = 0x40000000
CR_EQ = 0x20000000
CR_SO = 0x10000000
CR_FX = 0x08000000
CR_FEX = 0x04000000
CR_VX = 0x02000000
CR_OX = 0x01000000
XER_SO = 0x80000000
XER_OV = 0x40000000
XER_CA = 0x20000000
def XER_COMP_BYTE(xer): return ((xer >> 8) & 0x000000FF)
def XER_LENGTH(xer): return (xer & 0x0000007F)
DSISR_IO = 0x80000000
DSISR_PFT = 0x40000000
DSISR_LOCK = 0x20000000
DSISR_FPIO = 0x10000000
DSISR_PROT = 0x08000000
DSISR_LOOP = 0x04000000
DSISR_DRST = 0x04000000
DSISR_ST = 0x02000000
DSISR_SEGB = 0x01000000
DSISR_DABR = 0x00400000
DSISR_EAR = 0x00100000
SRR_IS_PFT = 0x40000000
SRR_IS_ISPEC = 0x20000000
SRR_IS_IIO = 0x10000000
SRR_IS_GUARD = 0x10000000
SRR_IS_PROT = 0x08000000
SRR_IS_LOOP = 0x04000000
SRR_PR_FPEN = 0x00100000
SRR_PR_INVAL = 0x00080000
SRR_PR_PRIV = 0x00040000
SRR_PR_TRAP = 0x00020000
SRR_PR_IMPRE = 0x00010000
def BUID_7F_SRVAL(raddr): return (0x87F00000 | (((uint)(raddr)) >> 28))
BT_256M = 0x1FFC
BT_128M = 0x0FFC
BT_64M = 0x07FC
BT_32M = 0x03FC
BT_16M = 0x01FC
BT_8M = 0x00FC
BT_4M = 0x007C
BT_2M = 0x003C
BT_1M = 0x001C
BT_512K = 0x000C
BT_256K = 0x0004
BT_128K = 0x0000
BT_NOACCESS = 0x0
BT_RDONLY = 0x1
BT_WRITE = 0x2
BT_VS = 0x2
BT_VP = 0x1
def BAT_ESEG(dbatu): return (((uint)(dbatu) >> 28))
MIN_BAT_SIZE = 0x00020000
MAX_BAT_SIZE = 0x10000000
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_LOCAL = 63
IPPROTO_EON = 80
IPPROTO_BIP = 0x53
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_TIMESERVER = 37
def IN_CLASSA(i): return (((int)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((int)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((int)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((int)(i) & 0xf0000000) == 0xe0000000)
def IN_MULTICAST(i): return IN_CLASSD(i)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
INADDR_UNSPEC_GROUP = 0xe0000000
INADDR_ALLHOSTS_GROUP = 0xe0000001
INADDR_MAX_LOCAL_GROUP = 0xe00000ff
def IN_EXPERIMENTAL(i): return (((int)(i) & 0xe0000000) == 0xe0000000)
def IN_BADCLASS(i): return (((int)(i) & 0xf0000000) == 0xf0000000)
INADDR_ANY = 0x00000000
INADDR_BROADCAST = 0xffffffff
INADDR_LOOPBACK = 0x7f000001
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20 | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2016 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.graph;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.graph.GraphConstants.INNER_CAPACITY;
import static com.google.common.graph.GraphConstants.INNER_LOAD_FACTOR;
import com.google.common.collect.HashMultiset;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Multiset;
import com.google.errorprone.annotations.concurrent.LazyInit;
import java.lang.ref.Reference;
import java.lang.ref.SoftReference;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.jspecify.annotations.Nullable;
/**
* An implementation of {@link NetworkConnections} for directed networks with parallel edges.
*
* @author James Sexton
* @param <N> Node parameter type
* @param <E> Edge parameter type
*/
final class DirectedMultiNetworkConnections<N, E> extends AbstractDirectedNetworkConnections<N, E> {
private DirectedMultiNetworkConnections(
Map<E, N> inEdges, Map<E, N> outEdges, int selfLoopCount) {
super(inEdges, outEdges, selfLoopCount);
}
static <N, E> DirectedMultiNetworkConnections<N, E> of() {
return new DirectedMultiNetworkConnections<>(
new HashMap<E, N>(INNER_CAPACITY, INNER_LOAD_FACTOR),
new HashMap<E, N>(INNER_CAPACITY, INNER_LOAD_FACTOR),
0);
}
static <N, E> DirectedMultiNetworkConnections<N, E> ofImmutable(
Map<E, N> inEdges, Map<E, N> outEdges, int selfLoopCount) {
return new DirectedMultiNetworkConnections<>(
ImmutableMap.copyOf(inEdges), ImmutableMap.copyOf(outEdges), selfLoopCount);
}
@LazyInit private transient @Nullable Reference<Multiset<N>> predecessorsReference;
@Override
public Set<N> predecessors() {
return Collections.unmodifiableSet(predecessorsMultiset().elementSet());
}
private Multiset<N> predecessorsMultiset() {
Multiset<N> predecessors = getReference(predecessorsReference);
if (predecessors == null) {
predecessors = HashMultiset.create(inEdgeMap.values());
predecessorsReference = new SoftReference<>(predecessors);
}
return predecessors;
}
@LazyInit private transient @Nullable Reference<Multiset<N>> successorsReference;
@Override
public Set<N> successors() {
return Collections.unmodifiableSet(successorsMultiset().elementSet());
}
private Multiset<N> successorsMultiset() {
Multiset<N> successors = getReference(successorsReference);
if (successors == null) {
successors = HashMultiset.create(outEdgeMap.values());
successorsReference = new SoftReference<>(successors);
}
return successors;
}
@Override
public Set<E> edgesConnecting(N node) {
return new MultiEdgesConnecting<E>(outEdgeMap, node) {
@Override
public int size() {
return successorsMultiset().count(node);
}
};
}
@Override
public N removeInEdge(E edge, boolean isSelfLoop) {
N node = super.removeInEdge(edge, isSelfLoop);
Multiset<N> predecessors = getReference(predecessorsReference);
if (predecessors != null) {
checkState(predecessors.remove(node));
}
return node;
}
@Override
public N removeOutEdge(E edge) {
N node = super.removeOutEdge(edge);
Multiset<N> successors = getReference(successorsReference);
if (successors != null) {
checkState(successors.remove(node));
}
return node;
}
@Override
public void addInEdge(E edge, N node, boolean isSelfLoop) {
super.addInEdge(edge, node, isSelfLoop);
Multiset<N> predecessors = getReference(predecessorsReference);
if (predecessors != null) {
checkState(predecessors.add(node));
}
}
@Override
public void addOutEdge(E edge, N node) {
super.addOutEdge(edge, node);
Multiset<N> successors = getReference(successorsReference);
if (successors != null) {
checkState(successors.add(node));
}
}
private static <T> @Nullable T getReference(@Nullable Reference<T> reference) {
return (reference == null) ? null : reference.get();
}
} | java | github | https://github.com/google/guava | android/guava/src/com/google/common/graph/DirectedMultiNetworkConnections.java |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import Enum
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy import Unicode
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import not_
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy.models.base import Base
from nailgun.db.sqlalchemy.models.fields import JSON
from nailgun.db.sqlalchemy.models.node import Role
class ReleaseOrchestratorData(Base):
__tablename__ = 'release_orchestrator_data'
id = Column(Integer, primary_key=True)
release_id = Column(Integer, ForeignKey('releases.id'), nullable=False)
repo_metadata = Column(JSON, nullable=False)
puppet_manifests_source = Column(Text, nullable=False)
puppet_modules_source = Column(Text, nullable=False)
class Release(Base):
__tablename__ = 'releases'
__table_args__ = (
UniqueConstraint('name', 'version'),
)
id = Column(Integer, primary_key=True)
name = Column(Unicode(100), nullable=False)
version = Column(String(30), nullable=False)
can_update_from_versions = Column(JSON, default=[],
nullable=False, server_default='[]')
description = Column(Unicode)
operating_system = Column(String(50), nullable=False)
state = Column(
Enum(
*consts.RELEASE_STATES,
name='release_state'
),
nullable=False,
default='not_available'
)
networks_metadata = Column(JSON, default=[])
attributes_metadata = Column(JSON, default={})
volumes_metadata = Column(JSON, default={})
modes_metadata = Column(JSON, default={})
roles_metadata = Column(JSON, default={})
wizard_metadata = Column(JSON, default={})
role_list = relationship(
"Role",
backref="release",
cascade="all,delete",
order_by="Role.id"
)
clusters = relationship(
"Cluster",
primaryjoin="Release.id==Cluster.release_id",
backref="release",
cascade="all,delete"
)
orchestrator_data = relationship("ReleaseOrchestratorData",
uselist=False,
cascade="delete")
#TODO(enchantner): get rid of properties
@property
def roles(self):
return [role.name for role in self.role_list]
@roles.setter
def roles(self, new_roles):
db().query(Role).filter(
not_(Role.name.in_(new_roles))
).filter(
Role.release_id == self.id
).delete(synchronize_session='fetch')
added_roles = self.roles
for role in new_roles:
if role not in added_roles:
new_role = Role(
name=role,
release=self
)
db().add(new_role)
added_roles.append(role) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
from nova.i18n import _LE
NOVA_VENDOR = "OpenStack Foundation"
NOVA_PRODUCT = "OpenStack Nova"
NOVA_PACKAGE = None # OS distro package version suffix
loaded = False
version_info = pbr.version.VersionInfo('nova')
version_string = version_info.version_string
def _load_config():
# Don't load in global context, since we can't assume
# these modules are accessible when distutils uses
# this module
from six.moves import configparser
from oslo_config import cfg
from oslo_log import log as logging
global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE
if loaded:
return
loaded = True
cfgfile = cfg.CONF.find_file("release")
if cfgfile is None:
return
try:
cfg = configparser.RawConfigParser()
cfg.read(cfgfile)
if cfg.has_option("Nova", "vendor"):
NOVA_VENDOR = cfg.get("Nova", "vendor")
if cfg.has_option("Nova", "product"):
NOVA_PRODUCT = cfg.get("Nova", "product")
if cfg.has_option("Nova", "package"):
NOVA_PACKAGE = cfg.get("Nova", "package")
except Exception as ex:
LOG = logging.getLogger(__name__)
LOG.error(_LE("Failed to load %(cfgfile)s: %(ex)s"),
{'cfgfile': cfgfile, 'ex': ex})
def vendor_string():
_load_config()
return NOVA_VENDOR
def product_string():
_load_config()
return NOVA_PRODUCT
def package_string():
_load_config()
return NOVA_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string()) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
@file pois2inductionLoops.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2010-02-18
@version $Id: pois2inductionLoops.py 13811 2013-05-01 20:31:43Z behrisch $
Converts a given pois located on lanes into induction loop detectors;
Each poi is replicated to cover all lanes of the road.
The detectors are named <POINAME>__l<LANE_INDEX>
Call: pois2inductionLoops.py <NET> <POIS> <OUTPUT>
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2010-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, string, sys, StringIO
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib
if len(sys.argv) < 4:
print "Usage: " + sys.argv[0] + " <NET> <POIS> <OUTPUT>"
sys.exit()
parser = make_parser()
print "Reading net..."
net = sumolib.net.readNet(sys.argv[1])
print "Reading PoIs..."
pois = sumolib.poi.readPois(sys.argv[2])
fdo = open(sys.argv[3], "w")
print >> fdo, '<additional>'
for poi in pois:
if not poi._lane:
print "Error: poi '%s' is not on a lane" % poi._id
continue
edge = poi._lane[:poi._lane.rfind('_')]
edge = net._id2edge[edge]
for i, l in enumerate(edge._lanes):
print >> fdo, ' <e1Detector id="%s__l%s" lane="%s" pos="%s" freq="60" file="e1_output.xml"/>' % (poi._id, i, l.getID(), poi._pos)
print >> fdo, ''
print >> fdo, '</additional>'
fdo.close() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
'''
Open Facebook allows you to use Facebook's open graph API with simple python code
**Features**
* Supported and maintained
* Tested so people can contribute
* Facebook exceptions are mapped
* Logging
**Basic examples**::
facebook = OpenFacebook(access_token)
# Getting info about me
facebook.get('me')
# Learning some more about fashiolista
facebook.get('fashiolista')
# Writing your first comment
facebook.set('fashiolista/comments', message='I love Fashiolista!')
# Posting to a users wall
facebook.set('me/feed', message='check out fashiolista',
url='http://www.fashiolista.com')
# Liking a page
facebook.set('fashiolista/likes')
# Getting who likes cocacola
facebook.set('cocacola/likes')
# Use fql to retrieve your name
facebook.fql('SELECT name FROM user WHERE uid = me()')
# Executing fql in batch
facebook.batch_fql([
'SELECT uid, name, pic_square FROM user WHERE uid = me()',
'SELECT uid, rsvp_status FROM event_member WHERE eid=12345678',
])
# Uploading pictures
photo_urls = [
'http://e.fashiocdn.com/images/entities/0/7/B/I/9/0.365x365.jpg',
'http://e.fashiocdn.com/images/entities/0/5/e/e/r/0.365x365.jpg',
]
for photo in photo_urls:
print facebook.set('me/feed', message='Check out Fashiolista',
picture=photo, url='http://www.fashiolista.com')
**Getting an access token**
Once you get your access token, Open Facebook gives you access to the Facebook API
There are 3 ways of getting a facebook access_token and these are currently
implemented by Django Facebook.
1. code is passed as request parameter and traded for an
access_token using the api
2. code is passed through a signed cookie and traded for an access_token
3. access_token is passed directly (retrieved through javascript, which
would be bad security, or through one of the mobile flows.)
If you are looking to develop your own flow for a different framework have a look at
Facebook's documentation:
http://developers.facebook.com/docs/authentication/
Also have a look at the :class:`.FacebookRequired` decorator and :func:`get_persistent_graph` function to
understand the required functionality
**Api docs**:
'''
from django.http import QueryDict
from django.utils import six
from django.utils.http import urlencode
from django_facebook import settings as facebook_settings
from open_facebook import exceptions as facebook_exceptions
from open_facebook.utils import json, encode_params, send_warning, memoized, \
stop_statsd, start_statsd
import logging
from django_facebook.utils import to_int
import ssl
import re
try:
# python 2 imports
from urlparse import urlparse
from urllib2 import build_opener, HTTPError, URLError
except ImportError:
# python 3 imports
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse
from urllib.request import build_opener
logger = logging.getLogger(__name__)
# base timeout, actual timeout will increase when requests fail
REQUEST_TIMEOUT = 10
# two retries was too little, sometimes facebook is a bit flaky
REQUEST_ATTEMPTS = 3
class FacebookConnection(object):
'''
Shared utility class implementing the parsing
of Facebook API responses
'''
api_url = 'https://graph.facebook.com/'
# this older url is still used for fql requests
old_api_url = 'https://api.facebook.com/method/'
@classmethod
def request(cls, path='', post_data=None, old_api=False, **params):
'''
Main function for sending the request to facebook
**Example**::
FacebookConnection.request('me')
:param path:
The path to request, examples: /me/friends/, /me/likes/
:param post_data:
A dictionary of data to post
:param parms:
The get params to include
'''
api_base_url = cls.old_api_url if old_api else cls.api_url
if getattr(cls, 'access_token', None):
params['access_token'] = cls.access_token
url = '%s%s?%s' % (api_base_url, path, urlencode(params))
response = cls._request(url, post_data)
return response
@classmethod
def _request(cls, url, post_data=None, timeout=REQUEST_TIMEOUT,
attempts=REQUEST_ATTEMPTS):
# change fb__explicitly_shared to fb:explicitly_shared
if post_data:
post_data = dict(
(k.replace('__', ':'), v) for k, v in post_data.items())
logger.info('requesting url %s with post data %s', url, post_data)
post_request = (post_data is not None or 'method=post' in url)
if post_request and facebook_settings.FACEBOOK_READ_ONLY:
logger.info('running in readonly mode')
response = dict(id=123456789, setting_read_only=True)
return response
# nicely identify ourselves before sending the request
opener = build_opener()
opener.addheaders = [('User-agent', 'Open Facebook Python')]
# get the statsd path to track response times with
path = urlparse(url).path
statsd_path = path.replace('.', '_')
# give it a few shots, connection is buggy at times
timeout_mp = 0
while attempts:
# gradually increase the timeout upon failure
timeout_mp += 1
extended_timeout = timeout * timeout_mp
response_file = None
encoded_params = encode_params(post_data) if post_data else None
post_string = (urlencode(encoded_params).encode('utf-8')
if post_data else None)
try:
start_statsd('facebook.%s' % statsd_path)
try:
response_file = opener.open(
url, post_string, timeout=extended_timeout)
response = response_file.read().decode('utf8')
except (HTTPError,) as e:
response_file = e
response = response_file.read().decode('utf8')
# Facebook sents error codes for many of their flows
# we still want the json to allow for proper handling
msg_format = 'FB request, error type %s, code %s'
logger.warn(msg_format, type(e), getattr(e, 'code', None))
# detect if its a server or application error
server_error = cls.is_server_error(e, response)
if server_error:
# trigger a retry
raise URLError(
'Facebook is down %s' % response)
break
except (HTTPError, URLError, ssl.SSLError) as e:
# These are often temporary errors, so we will retry before
# failing
error_format = 'Facebook encountered a timeout (%ss) or error %s'
logger.warn(error_format, extended_timeout, str(e))
attempts -= 1
if not attempts:
# if we have no more attempts actually raise the error
error_instance = facebook_exceptions.convert_unreachable_exception(
e)
error_msg = 'Facebook request failed after several retries, raising error %s'
logger.warn(error_msg, error_instance)
raise error_instance
finally:
if response_file:
response_file.close()
stop_statsd('facebook.%s' % statsd_path)
# Faceboook response is either
# Valid json
# A string which is a querydict (a=b&c=d...etc)
# A html page stating FB is having trouble (but that shouldnt reach
# this part of the code)
try:
parsed_response = json.loads(response)
logger.info('facebook send response %s' % parsed_response)
except Exception as e:
# using exception because we need to support multiple json libs :S
parsed_response = QueryDict(response, True)
logger.info('facebook send response %s' % parsed_response)
if parsed_response and isinstance(parsed_response, dict):
# of course we have two different syntaxes
if parsed_response.get('error'):
cls.raise_error(parsed_response['error']['type'],
parsed_response['error']['message'],
parsed_response['error'].get('code'))
elif parsed_response.get('error_code'):
cls.raise_error(parsed_response['error_code'],
parsed_response['error_msg'])
return parsed_response
@classmethod
def is_server_error(cls, e, response):
'''
Checks an HTTPError to see if Facebook is down or we are using the
API in the wrong way
Facebook doesn't clearly distinquish between the two, so this is a bit
of a hack
'''
from open_facebook.utils import is_json
server_error = False
if hasattr(e, 'code') and e.code == 500:
server_error = True
# Facebook status codes are used for application logic
# http://fbdevwiki.com/wiki/Error_codes#User_Permission_Errors
# The only way I know to detect an actual server error is to check if
# it looks like their error page
# TODO: think of a better solution....
error_matchers = [
'<title>Facebook | Error</title>',
'Sorry, something went wrong.'
]
is_error_page = all(
[matcher in response for matcher in error_matchers])
if is_error_page:
server_error = True
# if it looks like json, facebook is probably not down
if is_json(response):
server_error = False
return server_error
@classmethod
def raise_error(cls, error_type, message, error_code=None):
'''
Lookup the best error class for the error and raise it
**Example**::
FacebookConnection.raise_error(10, 'OAuthException')
:param error_type:
the error type from the facebook api call
:param message:
the error message from the facebook api call
:param error_code:
optionally the error code which facebook send
'''
default_error_class = facebook_exceptions.OpenFacebookException
# get the error code
error_code = error_code or cls.get_code_from_message(message)
# also see http://fbdevwiki.com/wiki/Error_codes#User_Permission_Errors
logger.info('Trying to match error code %s to error class', error_code)
# lookup by error code takes precedence
error_class = cls.match_error_code(error_code)
# try to get error class by direct lookup
if not error_class:
if not isinstance(error_type, int):
error_class = getattr(facebook_exceptions, error_type, None)
if error_class and not issubclass(error_class, default_error_class):
error_class = None
# hack for missing parameters
if 'Missing' in message and 'parameter' in message:
error_class = facebook_exceptions.MissingParameter
# hack for Unsupported delete request
if 'Unsupported delete request' in message:
error_class = facebook_exceptions.UnsupportedDeleteRequest
# fallback to the default
if not error_class:
error_class = default_error_class
logger.info('Matched error to class %s', error_class)
error_message = message
if error_code:
# this is handy when adding new exceptions for facebook errors
error_message = u'%s (error code %s)' % (message, error_code)
raise error_class(error_message)
@classmethod
def get_code_from_message(cls, message):
# map error classes to facebook error codes
# find the error code
error_code = None
error_code_re = re.compile('\(#(\d+)\)')
matches = error_code_re.match(message)
matching_groups = matches.groups() if matches else None
if matching_groups:
error_code = to_int(matching_groups[0]) or None
return error_code
@classmethod
def get_sorted_exceptions(cls):
from open_facebook.exceptions import get_exception_classes
exception_classes = get_exception_classes()
exception_classes.sort(key=lambda e: e.range())
return exception_classes
@classmethod
def match_error_code(cls, error_code):
'''
Return the right exception class for the error code
'''
exception_classes = cls.get_sorted_exceptions()
error_class = None
for class_ in exception_classes:
codes_list = class_.codes_list()
# match the error class
matching_error_class = None
for code in codes_list:
if isinstance(code, tuple):
start, stop = code
if error_code and start <= error_code <= stop:
matching_error_class = class_
logger.info('Matched error on code %s', code)
elif isinstance(code, (int, six.integer_types)):
if int(code) == error_code:
matching_error_class = class_
logger.info('Matched error on code %s', code)
else:
raise(
ValueError, 'Dont know how to handle %s of '
'type %s' % (code, type(code)))
# tell about the happy news if we found something
if matching_error_class:
error_class = matching_error_class
break
return error_class
class FacebookAuthorization(FacebookConnection):
'''
Methods for getting us an access token
There are several flows we must support
* js authentication flow (signed cookie)
* facebook app authentication flow (signed cookie)
* facebook oauth redirect (code param in url)
These 3 options need to be converted to an access token
Also handles several testing scenarios
* get app access token
* create test user
* get_or_create_test_user
'''
@classmethod
def convert_code(cls, code,
redirect_uri='http://local.mellowmorning.com:8000/facebook/connect/'):
'''
Turns a code into an access token
**Example**::
FacebookAuthorization.convert_code(code)
:param code:
The code to convert
:param redirect_uri:
The redirect uri with which the code was requested
:returns: dict
'''
kwargs = cls._client_info()
kwargs['code'] = code
kwargs['redirect_uri'] = redirect_uri
response = cls.request('oauth/access_token', **kwargs)
return response
@classmethod
def extend_access_token(cls, access_token):
'''
https://developers.facebook.com/roadmap/offline-access-removal/
We can extend the token only once per day
Normal short lived tokens last 1-2 hours
Long lived tokens (given by extending) last 60 days
**Example**::
FacebookAuthorization.extend_access_token(access_token)
:param access_token:
The access_token to extend
:returns: dict
'''
kwargs = cls._client_info()
kwargs['grant_type'] = 'fb_exchange_token'
kwargs['fb_exchange_token'] = access_token
response = cls.request('oauth/access_token', **kwargs)
return response
@classmethod
def _client_info(cls):
kwargs = dict(client_id=facebook_settings.FACEBOOK_APP_ID)
kwargs['client_secret'] = facebook_settings.FACEBOOK_APP_SECRET
return kwargs
@classmethod
def parse_signed_data(cls, signed_request,
secret=facebook_settings.FACEBOOK_APP_SECRET):
'''
Thanks to
http://stackoverflow.com/questions/3302946/how-to-base64-url-decode-in-python
and
http://sunilarora.org/parsing-signedrequest-parameter-in-python-bas
'''
from open_facebook.utils import base64_url_decode_php_style, smart_str
l = signed_request.split('.', 2)
encoded_sig = l[0]
payload = l[1]
from open_facebook.utils import json
sig = base64_url_decode_php_style(encoded_sig)
import hmac
import hashlib
data = json.loads(base64_url_decode_php_style(payload).decode('utf-8'))
algo = data.get('algorithm').upper()
if algo != 'HMAC-SHA256':
error_format = 'Unknown algorithm we only support HMAC-SHA256 user asked for %s'
error_message = error_format % algo
send_warning(error_message)
logger.error('Unknown algorithm')
return None
else:
expected_sig = hmac.new(smart_str(secret), msg=smart_str(payload),
digestmod=hashlib.sha256).digest()
if not sig == expected_sig:
error_format = 'Signature %s didnt match the expected signature %s'
error_message = error_format % (sig, expected_sig)
send_warning(error_message)
return None
else:
logger.debug('valid signed request received..')
return data
@classmethod
def get_app_access_token(cls):
'''
Get the access_token for the app that can be used for
insights and creating test users
application_id = retrieved from the developer page
application_secret = retrieved from the developer page
returns the application access_token
'''
kwargs = {
'grant_type': 'client_credentials',
'client_id': facebook_settings.FACEBOOK_APP_ID,
'client_secret': facebook_settings.FACEBOOK_APP_SECRET,
}
response = cls.request('oauth/access_token', **kwargs)
return response['access_token']
@memoized
@classmethod
def get_cached_app_access_token(cls):
'''
Caches the access token in memory, good for speeding up testing
'''
app_access_token = cls.get_app_access_token()
return app_access_token
@classmethod
def create_test_user(cls, app_access_token, permissions=None, name=None):
'''
Creates a test user with the given permissions and name
:param app_access_token:
The application's access token
:param permissions:
The list of permissions to request for the test user
:param name:
Optionally specify the name
'''
if not permissions:
permissions = ['read_stream', 'publish_stream',
'user_photos,offline_access']
if isinstance(permissions, list):
permissions = ','.join(permissions)
default_name = 'Permissions %s' % permissions.replace(
',', ' ').replace('_', '')
name = name or default_name
kwargs = {
'access_token': app_access_token,
'installed': True,
'name': name,
'method': 'post',
'permissions': permissions,
}
path = '%s/accounts/test-users' % facebook_settings.FACEBOOK_APP_ID
# add the test user data to the test user data class
test_user_data = cls.request(path, **kwargs)
test_user_data['name'] = name
test_user = TestUser(test_user_data)
return test_user
@classmethod
def get_or_create_test_user(cls, app_access_token, name=None, permissions=None, force_create=False):
'''
There is no supported way of get or creating a test user
However
- creating a test user takes around 5s
- you an only create 500 test users
So this slows your testing flow quite a bit.
This method checks your test users
Queries their names (stores the permissions in the name)
'''
if not permissions:
permissions = ['read_stream', 'publish_stream', 'publish_actions',
'user_photos,offline_access']
if isinstance(permissions, list):
permissions = ','.join(permissions)
# hacking the permissions into the name of the test user
default_name = 'Permissions %s' % permissions.replace(
',', ' ').replace('_', '')
name = name or default_name
# retrieve all test users
test_users = cls.get_test_users(app_access_token)
user_id_dict = dict([(int(u['id']), u) for u in test_users])
user_ids = map(str, user_id_dict.keys())
# use fql to figure out their names
facebook = OpenFacebook(app_access_token)
users = facebook.fql('SELECT uid, name FROM user WHERE uid in (%s)' %
','.join(user_ids))
users_dict = dict([(u['name'], u['uid']) for u in users])
user_id = users_dict.get(name)
if force_create and user_id:
# we need the users access_token, the app access token doesn't
# always work, seems to be a bug in the Facebook api
test_user_data = user_id_dict[user_id]
cls.delete_test_user(test_user_data['access_token'], user_id)
user_id = None
if user_id:
# we found our user, extend the data a bit
test_user_data = user_id_dict[user_id]
test_user_data['name'] = name
test_user = TestUser(test_user_data)
else:
# create the user
test_user = cls.create_test_user(
app_access_token, permissions, name)
return test_user
@classmethod
def get_test_users(cls, app_access_token):
kwargs = dict(access_token=app_access_token)
path = '%s/accounts/test-users' % facebook_settings.FACEBOOK_APP_ID
# retrieve all test users
response = cls.request(path, **kwargs)
test_users = response['data']
return test_users
@classmethod
def delete_test_user(cls, app_access_token, test_user_id):
kwargs = dict(access_token=app_access_token, method='delete')
path = '%s/' % test_user_id
# retrieve all test users
response = cls.request(path, **kwargs)
return response
@classmethod
def delete_test_users(cls, app_access_token):
# retrieve all test users
test_users = cls.get_test_users(app_access_token)
test_user_ids = [u['id'] for u in test_users]
for test_user_id in test_user_ids:
cls.delete_test_user(app_access_token, test_user_id)
class OpenFacebook(FacebookConnection):
'''
The main api class, initialize using
**Example**::
graph = OpenFacebook(access_token)
print(graph.get('me'))
'''
def __init__(self, access_token=None, prefetched_data=None,
expires=None, current_user_id=None, version=None):
'''
:param access_token:
The facebook Access token
'''
self.access_token = access_token
# extra data coming from signed cookies
self.prefetched_data = prefetched_data
# store to enable detection for offline usage
self.expires = expires
# hook to store the current user id if representing the
# facebook connection to a logged in user :)
self.current_user_id = current_user_id
if version is None:
version = 'v1.0'
self.version = version
def __getstate__(self):
'''
Turns the object into something easy to serialize
'''
state = dict(
access_token=self.access_token,
prefetched_data=self.prefetched_data,
expires=self.expires,
)
return state
def __setstate__(self, state):
'''
Restores the object from the state dict
'''
self.access_token = state['access_token']
self.prefetched_data = state['prefetched_data']
self.expires = state['expires']
def is_authenticated(self):
'''
Ask facebook if we have access to the users data
:returns: bool
'''
try:
me = self.me()
except facebook_exceptions.OpenFacebookException as e:
if isinstance(e, facebook_exceptions.OAuthException):
raise
me = None
authenticated = bool(me)
return authenticated
def get(self, path, version=None, **kwargs):
'''
Make a Facebook API call
**Example**::
open_facebook.get('me')
open_facebook.get('me', fields='id,name')
:param path:
The path to use for making the API call
:returns: dict
'''
version = version or self.version
kwargs['version'] = version
response = self.request(path, **kwargs)
return response
def get_many(self, *ids, **kwargs):
'''
Make a batched Facebook API call
For multiple ids
**Example**::
open_facebook.get_many('me', 'starbucks')
open_facebook.get_many('me', 'starbucks', fields='id,name')
:param path:
The path to use for making the API call
:returns: dict
'''
kwargs['ids'] = ','.join(ids)
return self.request(**kwargs)
def set(self, path, params=None, version=None, **post_data):
'''
Write data to facebook
**Example**::
open_facebook.set('me/feed', message='testing open facebook')
:param path:
The path to use for making the API call
:param params:
A dictionary of get params
:param post_data:
The kwargs for posting to facebook
:returns: dict
'''
version = version or self.version
assert self.access_token, 'Write operations require an access token'
if not params:
params = {}
params['method'] = 'post'
params['version'] = version
response = self.request(path, post_data=post_data, **params)
return response
def delete(self, path, *args, **kwargs):
'''
Delete the given bit of data
**Example**::
graph.delete(12345)
:param path:
the id of the element to remove
'''
kwargs['method'] = 'delete'
self.request(path, *args, **kwargs)
def fql(self, query, **kwargs):
'''
Runs the specified query against the Facebook FQL API.
**Example**::
open_facebook.fql('SELECT name FROM user WHERE uid = me()')
:param query:
The query to execute
:param kwargs:
Extra options to send to facebook
:returns: dict
'''
kwargs['q'] = query
path = 'fql'
response = self.request(path, **kwargs)
# return only the data for backward compatability
return response['data']
def batch_fql(self, queries_dict):
'''
queries_dict a dict with the required queries
returns the query results in:
**Example**::
response = facebook.batch_fql({
name: 'SELECT uid, name, pic_square FROM user WHERE uid = me()',
rsvp: 'SELECT uid, rsvp_status FROM event_member WHERE eid=12345678',
})
# accessing the results
response['fql_results']['name']
response['fql_results']['rsvp']
:param queries_dict:
A dictiontary of queries to execute
:returns: dict
'''
query = json.dumps(queries_dict)
query_results = self.fql(query)
named_results = dict(
[(r['name'], r['fql_result_set']) for r in query_results])
return named_results
def me(self):
'''
Cached method of requesting information about me
'''
me = getattr(self, '_me', None)
if me is None:
# self._me = me = self.get('me')
self._me = me = self.get('me', fields="id,name,email,verified")
return me
def permissions(self):
'''
Shortcut for self.get('me/permissions') with some extra parsing
to turn it into a dictionary of booleans
:returns: dict
'''
permissions_dict = {}
try:
permissions = {}
permissions_response = self.get('me/permissions')
# determine whether we're dealing with 1.0 or 2.0+
for permission in permissions_response.get('data', []):
# graph api 2.0+, returns multiple dicts with keys 'status' and
# 'permission'
if any(value in ['granted', 'declined'] for value in permission.values()):
for perm in permissions_response['data']:
grant = perm.get('status') == 'granted'
name = perm.get('permission')
# just in case something goes sideways
if grant and name:
permissions_dict[name] = grant
# graph api 1.0, returns single dict as {permission: intval}
elif any(value in [0, 1, '0', '1'] for value in permission.values()):
permissions = permissions_response['data'][0]
permissions_dict = dict([(k, bool(int(v)))
for k, v in permissions.items()
if v == '1' or v == 1])
break
except facebook_exceptions.OAuthException:
pass
return permissions_dict
def has_permissions(self, required_permissions):
'''
Validate if all the required_permissions are currently given
by the user
**Example**::
open_facebook.has_permissions(['publish_actions','read_stream'])
:param required_permissions:
A list of required permissions
:returns: bool
'''
permissions_dict = self.permissions()
# see if we have all permissions
has_permissions = True
for permission in required_permissions:
if permission not in permissions_dict:
has_permissions = False
return has_permissions
def my_image_url(self, size='large'):
'''
Returns the image url from your profile
Shortcut for me/picture
:param size:
the type of the image to request, see facebook for available formats
:returns: string
'''
query_dict = QueryDict('', True)
query_dict['type'] = size
query_dict['access_token'] = self.access_token
url = '%sme/picture?%s' % (self.api_url, query_dict.urlencode())
return url
def request(self, path='', post_data=None, old_api=False, version=None, **params):
url = self.get_request_url(path=path, old_api=old_api, version=version,
**params)
logger.info('requesting url %s', url)
response = self._request(url, post_data)
return response
def get_request_url(self, path='', old_api=False, version=None, **params):
'''
Gets the url for the request.
'''
api_base_url = self.old_api_url if old_api else self.api_url
version = version or self.version
if getattr(self, 'access_token', None):
params['access_token'] = self.access_token
if api_base_url.endswith('/'):
api_base_url = api_base_url[:-1]
if path and path.startswith('/'):
path = path[1:]
url = '/'.join([api_base_url, version, path])
return '%s?%s' % (url, urlencode(params))
class TestUser(object):
'''
Simple wrapper around test users
'''
def __init__(self, data):
self.name = data['name']
self.id = data['id']
self.access_token = data['access_token']
self.data = data
def graph(self):
graph = OpenFacebook(self.access_token)
return graph
def __repr__(self):
return 'Test user %s' % self.name | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
# Tester
# ------------------------------------------------------------
import fnmatch
import os
import re
if os.path.isfile("result.log"): os.remove("result.log")
lastfile = ""
def print_error(line, patern, file):
global lastfile
line +=1
if not lastfile == file:
lastfile = file
open("result.log", "a+").write('\n{:55s} {:s}\n'.format(file, "-"*80 ) )
open("result.log", "a+").write('{:55s} {:^5s} {:^5s} {:^70s}\n'.format("", "Linea", "Tipo", "Contenido" ) )
open("result.log", "a+").write('{:55s} {:s}\n'.format("", "-"*80 ) )
open("result.log", "a+").write('{:55s} {:5d} {:5s} {:s}\n'.format("", line, "ERROR", patern.strip() ) )
def compatibility_check(file):
data = open(file, "rb").read()
#if else en la misma linea
p = re.compile("^[^\r\n#]*=[^\r\n]*if [^\r\n]* else[^\r\n]*", re.MULTILINE)
for m in p.finditer(data):
lines = data[:m.start()].splitlines()
print_error(len(lines), m.group(), file)
#Uso de .format{}
p = re.compile("^[^\r\n#]*=[^\r\n]*.format{[^\r\n]*", re.MULTILINE)
for m in p.finditer(data):
lines = data[:m.start()].splitlines()
print_error(len(lines), m.group(), file)
#Uso diccionario por compresion
p = re.compile("^[^\r\n#]*\{[^\r\n]* for [^\r\n]*\}[^\r\n]*", re.MULTILINE)
for m in p.finditer(data):
lines = data[:m.start()].splitlines()
print_error(len(lines), m.group(), file)
#with open(...) as f:
p = re.compile("^[^\r\n#]*with [^\r\n]+ as [^\:\r\n]+\:[^\r\n]*", re.MULTILINE)
for m in p.finditer(data):
lines = data[:m.start()].splitlines()
print_error(len(lines), m.group(), file)
#Thread.is_alive() deve ser sustituido por Thread.isAlive()
p = re.compile("^[^\r\n#]*.is_alive()[^\r\n]*", re.MULTILINE)
for m in p.finditer(data):
lines = data[:m.start()].splitlines()
print_error(len(lines), m.group(), file)
files = []
for root, dirnames, filenames in os.walk('main-classic'):
for filename in fnmatch.filter(filenames, '*.py'):
files.append(os.path.join(root, filename))
for file in files:
print "Comprobando %s..." % file
compatibility_check(file) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script tests the installer with test cases specified in the config file.
For each test case, it checks that the machine states after the execution of
each command match the expected machine states. For more details, take a look at
the design documentation at http://goo.gl/Q0rGM6
"""
import json
import optparse
import os
import subprocess
import sys
import unittest
from variable_expander import VariableExpander
import verifier_runner
class Config:
"""Describes the machine states, actions, and test cases.
Attributes:
states: A dictionary where each key is a state name and the associated value
is a property dictionary describing that state.
actions: A dictionary where each key is an action name and the associated
value is the action's command.
tests: An array of test cases.
"""
def __init__(self):
self.states = {}
self.actions = {}
self.tests = []
class InstallerTest(unittest.TestCase):
"""Tests a test case in the config file."""
def __init__(self, test, config, variable_expander):
"""Constructor.
Args:
test: An array of alternating state names and action names, starting and
ending with state names.
config: The Config object.
variable_expander: A VariableExpander object.
"""
super(InstallerTest, self).__init__()
self._test = test
self._config = config
self._variable_expander = variable_expander
self._verifier_runner = verifier_runner.VerifierRunner()
self._clean_on_teardown = True
def __str__(self):
"""Returns a string representing the test case.
Returns:
A string created by joining state names and action names together with
' -> ', for example, 'Test: clean -> install chrome -> chrome_installed'.
"""
return 'Test: %s\n' % (' -> '.join(self._test))
def runTest(self):
"""Run the test case."""
# |test| is an array of alternating state names and action names, starting
# and ending with state names. Therefore, its length must be odd.
self.assertEqual(1, len(self._test) % 2,
'The length of test array must be odd')
state = self._test[0]
self._VerifyState(state)
# Starting at index 1, we loop through pairs of (action, state).
for i in range(1, len(self._test), 2):
action = self._test[i]
RunCommand(self._config.actions[action], self._variable_expander)
state = self._test[i + 1]
self._VerifyState(state)
# If the test makes it here, it means it was successful, because RunCommand
# and _VerifyState throw an exception on failure.
self._clean_on_teardown = False
def tearDown(self):
"""Cleans up the machine if the test case fails."""
if self._clean_on_teardown:
RunCleanCommand(True, self._variable_expander)
def shortDescription(self):
"""Overridden from unittest.TestCase.
We return None as the short description to suppress its printing.
The default implementation of this method returns the docstring of the
runTest method, which is not useful since it's the same for every test case.
The description from the __str__ method is informative enough.
"""
return None
def _VerifyState(self, state):
"""Verifies that the current machine state matches a given state.
Args:
state: A state name.
"""
try:
self._verifier_runner.VerifyAll(self._config.states[state],
self._variable_expander)
except AssertionError as e:
# If an AssertionError occurs, we intercept it and add the state name
# to the error message so that we know where the test fails.
raise AssertionError("In state '%s', %s" % (state, e))
def RunCommand(command, variable_expander):
"""Runs the given command from the current file's directory.
This function throws an Exception if the command returns with non-zero exit
status.
Args:
command: A command to run. It is expanded using Expand.
variable_expander: A VariableExpander object.
"""
expanded_command = variable_expander.Expand(command)
script_dir = os.path.dirname(os.path.abspath(__file__))
exit_status = subprocess.call(expanded_command, shell=True, cwd=script_dir)
if exit_status != 0:
raise Exception('Command %s returned non-zero exit status %s' % (
expanded_command, exit_status))
def RunCleanCommand(force_clean, variable_expander):
"""Puts the machine in the clean state (i.e. Chrome not installed).
Args:
force_clean: A boolean indicating whether to force cleaning existing
installations.
variable_expander: A VariableExpander object.
"""
# TODO(sukolsak): Read the clean state from the config file and clean
# the machine according to it.
# TODO(sukolsak): Handle Chrome SxS installs.
commands = []
interactive_option = '--interactive' if not force_clean else ''
for level_option in ('', '--system-level'):
commands.append('python uninstall_chrome.py '
'--chrome-long-name="$CHROME_LONG_NAME" '
'--no-error-if-absent %s %s' %
(level_option, interactive_option))
RunCommand(' && '.join(commands), variable_expander)
def MergePropertyDictionaries(current_property, new_property):
"""Merges the new property dictionary into the current property dictionary.
This is different from general dictionary merging in that, in case there are
keys with the same name, we merge values together in the first level, and we
override earlier values in the second level. For more details, take a look at
http://goo.gl/uE0RoR
Args:
current_property: The property dictionary to be modified.
new_property: The new property dictionary.
"""
for key, value in new_property.iteritems():
if key not in current_property:
current_property[key] = value
else:
assert(isinstance(current_property[key], dict) and
isinstance(value, dict))
# This merges two dictionaries together. In case there are keys with
# the same name, the latter will override the former.
current_property[key] = dict(
current_property[key].items() + value.items())
def ParsePropertyFiles(directory, filenames):
"""Parses an array of .prop files.
Args:
property_filenames: An array of Property filenames.
directory: The directory where the Config file and all Property files
reside in.
Returns:
A property dictionary created by merging all property dictionaries specified
in the array.
"""
current_property = {}
for filename in filenames:
path = os.path.join(directory, filename)
new_property = json.load(open(path))
MergePropertyDictionaries(current_property, new_property)
return current_property
def ParseConfigFile(filename):
"""Parses a .config file.
Args:
config_filename: A Config filename.
Returns:
A Config object.
"""
config_data = json.load(open(filename, 'r'))
directory = os.path.dirname(os.path.abspath(filename))
config = Config()
config.tests = config_data['tests']
for state_name, state_property_filenames in config_data['states']:
config.states[state_name] = ParsePropertyFiles(directory,
state_property_filenames)
for action_name, action_command in config_data['actions']:
config.actions[action_name] = action_command
return config
def RunTests(mini_installer_path, config, force_clean):
"""Tests the installer using the given Config object.
Args:
mini_installer_path: The path to mini_installer.exe.
config: A Config object.
force_clean: A boolean indicating whether to force cleaning existing
installations.
Returns:
True if all the tests passed, or False otherwise.
"""
suite = unittest.TestSuite()
variable_expander = VariableExpander(mini_installer_path)
RunCleanCommand(force_clean, variable_expander)
for test in config.tests:
suite.addTest(InstallerTest(test, config, variable_expander))
result = unittest.TextTestRunner(verbosity=2).run(suite)
return result.wasSuccessful()
def main():
usage = 'usage: %prog [options] config_filename'
parser = optparse.OptionParser(usage, description='Test the installer.')
parser.add_option('--build-dir', default='out',
help='Path to main build directory (the parent of the '
'Release or Debug directory)')
parser.add_option('--target', default='Release',
help='Build target (Release or Debug)')
parser.add_option('--force-clean', action='store_true', dest='force_clean',
default=False, help='Force cleaning existing installations')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments.')
config_filename = args[0]
mini_installer_path = os.path.join(options.build_dir, options.target,
'mini_installer.exe')
assert os.path.exists(mini_installer_path), ('Could not find file %s' %
mini_installer_path)
config = ParseConfigFile(config_filename)
if not RunTests(mini_installer_path, config, options.force_clean):
return 1
return 0
if __name__ == '__main__':
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
import praw
import re
import time
from random import random
from ImageScript import build_image, delete_image
from config import MEMORABLE_QUOTE_REGEX, source, start, stop
from config import bot_username, delete_threshold, already_stopped, stopped
from config import started, stopauto, startauto, PMme, numfigs
from database import Comment, Bannedsub, Banneduser, Message, Historicalfigure
from database import db
from pony.orm import db_session, select, delete
# connect to the database
db.bind(provider='sqlite', filename='database.db')
db.generate_mapping(create_tables=False)
# connect to the account
reddit = praw.Reddit('memorable_quote_bot')
# open a single database session
@db_session
def main():
number_comments = 0
check_inbox()
delete_if_downvoted()
comments = reddit.subreddit('test').comments(limit=100)
for comment in comments:
regex = re.search(MEMORABLE_QUOTE_REGEX, comment.body)
subname = comment.subreddit.display_name
allowed = not select(
b for b in Bannedsub if b.name == subname)[:]
fresh = not select(
c for c in Comment if c.id == comment.id)[:]
if regex and allowed and fresh:
# select a random person from db and create image
person = select(x.name for x in Historicalfigure)[:][
int(random()*numfigs)]
number_comments = number_comments + 1
image = build_image(regex.group(0), comment.id, person)
Comment(id=comment.id, url=image)
comment.reply(make_comment(image))
print(
regex.group(0) + '\n' + '-_-_-' +
comment.subreddit.display_name + '\n' + comment.permalink +
'\n')
print('NEXT')
def make_comment(image_link):
"""
writes a comment ready to be sent.
"""
return ''.join([
image_link, "\n\n _I am a bot._ ^bleep ^^bloop - [stop](", stopauto,
") | [start](", startauto, ") | [source](", source, ") | [PM me!](",
PMme, ")"
])
# check inbox to block and unblock on demand.
# message 'stop' to not reply to your comment, 'start' to begin again.
def check_inbox():
# with db_session:
msg_list = select(m.id for m in Message)[:20]
blacklist = select(b.name for b in Banneduser)[:20]
msgs = reddit.inbox.messages(limit=100)
for msg in msgs:
if msg.id not in msg_list:
print(msg.body + ' not in list')
Message(id=msg.id)
if msg.author.name in blacklist:
if msg.body == start:
delete(m for m in Message if m.id == msg.id)
msg.reply(started)
elif msg.body == stop:
msg.reply(already_stopped)
else:
if msg.body == start:
msg.reply(started)
elif msg.body == stop:
Banneduser(name=msg.author.name)
msg.reply(stopped)
def delete_if_downvoted():
"""
auto delete the comment and image if downvoted
"""
for comment in reddit.redditor(bot_username).comments.controversial(
'all', limit=None):
if comment.score <= delete_threshold:
link = (
c.url for c in Comment if c.id == comment.parent().id)[0]
delete_image(link)
comment.delete()
def root():
beginning = time.time()
now = beginning
for i in range(0, 10): # while True:
try:
main()
except Exception as e:
if e == praw.exceptions.APIException:
print('oops, exceeded the limit. 100 sec. sleep')
time.sleep(100)
else:
print(str(e))
time.sleep(2)
if (time.time() - now) >= 3600:
now = time.time()
end = time.time()
print('\n\n')
print(end - beginning)
print('\n')
if __name__ == '__main__':
root() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughImageCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(ToughImageCasesPage, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
class ToughImageCasesPageSet(page_set_module.PageSet):
""" A collection of image-heavy sites. """
def __init__(self):
super(ToughImageCasesPageSet, self).__init__(
user_agent_type='desktop')
urls_list = [
'http://www.free-pictures-photos.com/aviation/airplane-306.jpg',
('http://upload.wikimedia.org/wikipedia/commons/c/cb/'
'General_history%2C_Alaska_Yukon_Pacific_Exposition%'
'2C_fully_illustrated_-_meet_me_in_Seattle_1909_-_Page_78.jpg')
]
for url in urls_list:
self.AddUserStory(ToughImageCasesPage(url, self)) | unknown | codeparrot/codeparrot-clean | ||
"""A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
attrfind = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<!':
self.error('unexpected call to parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+2] not in ('<!', '</'):
self.error('unexpected call to parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group().lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem)
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
pass
# Internal -- helper to remove special character quoting
entitydefs = None
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:], 16)
else:
c = int(s)
return unichr(c)
except ValueError:
return '&#'+s+';'
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
import htmlentitydefs
if HTMLParser.entitydefs is None:
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = unichr(v)
try:
return self.entitydefs[s]
except KeyError:
return '&'+s+';'
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package math
// The original C code, the long comment, and the constants
// below were from http://netlib.sandia.gov/cephes/cmath/sin.c,
// available from http://www.netlib.org/cephes/cmath.tgz.
// The go code is a simplified version of the original C.
// tanh.c
//
// Hyperbolic tangent
//
// SYNOPSIS:
//
// double x, y, tanh();
//
// y = tanh( x );
//
// DESCRIPTION:
//
// Returns hyperbolic tangent of argument in the range MINLOG to MAXLOG.
// MAXLOG = 8.8029691931113054295988e+01 = log(2**127)
// MINLOG = -8.872283911167299960540e+01 = log(2**-128)
//
// A rational function is used for |x| < 0.625. The form
// x + x**3 P(x)/Q(x) of Cody & Waite is employed.
// Otherwise,
// tanh(x) = sinh(x)/cosh(x) = 1 - 2/(exp(2x) + 1).
//
// ACCURACY:
//
// Relative error:
// arithmetic domain # trials peak rms
// IEEE -2,2 30000 2.5e-16 5.8e-17
//
// Cephes Math Library Release 2.8: June, 2000
// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
//
// The readme file at http://netlib.sandia.gov/cephes/ says:
// Some software in this archive may be from the book _Methods and
// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
// International, 1989) or from the Cephes Mathematical Library, a
// commercial product. In either event, it is copyrighted by the author.
// What you see here may be used freely but it comes with no support or
// guarantee.
//
// The two known misprints in the book are repaired here in the
// source listings for the gamma function and the incomplete beta
// integral.
//
// Stephen L. Moshier
// moshier@na-net.ornl.gov
//
var tanhP = [...]float64{
-9.64399179425052238628e-1,
-9.92877231001918586564e1,
-1.61468768441708447952e3,
}
var tanhQ = [...]float64{
1.12811678491632931402e2,
2.23548839060100448583e3,
4.84406305325125486048e3,
}
// Tanh returns the hyperbolic tangent of x.
//
// Special cases are:
//
// Tanh(±0) = ±0
// Tanh(±Inf) = ±1
// Tanh(NaN) = NaN
func Tanh(x float64) float64 {
if haveArchTanh {
return archTanh(x)
}
return tanh(x)
}
func tanh(x float64) float64 {
const MAXLOG = 8.8029691931113054295988e+01 // log(2**127)
z := Abs(x)
switch {
case z > 0.5*MAXLOG:
if x < 0 {
return -1
}
return 1
case z >= 0.625:
s := Exp(2 * z)
z = 1 - 2/(s+1)
if x < 0 {
z = -z
}
default:
if x == 0 {
return x
}
s := x * x
z = x + x*s*((tanhP[0]*s+tanhP[1])*s+tanhP[2])/(((s+tanhQ[0])*s+tanhQ[1])*s+tanhQ[2])
}
return z
} | go | github | https://github.com/golang/go | src/math/tanh.go |
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
namespace cv { namespace hal {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
void split8u(const uchar* src, uchar** dst, int len, int cn);
void split16u(const ushort* src, ushort** dst, int len, int cn);
void split32s(const int* src, int** dst, int len, int cn);
void split64s(const int64* src, int64** dst, int len, int cn);
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
#if (CV_SIMD || CV_SIMD_SCALABLE)
// see the comments for vecmerge_ in merge.cpp
template<typename T, typename VecT> static void
vecsplit_( const T* src, T** dst, int len, int cn )
{
const int VECSZ = VTraits<VecT>::vlanes();
int i, i0 = 0;
T* dst0 = dst[0];
T* dst1 = dst[1];
int r0 = (int)((size_t)(void*)dst0 % (VECSZ*sizeof(T)));
int r1 = (int)((size_t)(void*)dst1 % (VECSZ*sizeof(T)));
int r2 = cn > 2 ? (int)((size_t)(void*)dst[2] % (VECSZ*sizeof(T))) : r0;
int r3 = cn > 3 ? (int)((size_t)(void*)dst[3] % (VECSZ*sizeof(T))) : r0;
hal::StoreMode mode = hal::STORE_ALIGNED_NOCACHE;
if( (r0|r1|r2|r3) != 0 )
{
mode = hal::STORE_UNALIGNED;
if( r0 == r1 && r0 == r2 && r0 == r3 && r0 % sizeof(T) == 0 && len > VECSZ*2 )
i0 = VECSZ - (r0 / sizeof(T));
}
if( cn == 2 )
{
for( i = 0; i < len; i += VECSZ )
{
if( i > len - VECSZ )
{
i = len - VECSZ;
mode = hal::STORE_UNALIGNED;
}
VecT a, b;
v_load_deinterleave(src + i*cn, a, b);
v_store(dst0 + i, a, mode);
v_store(dst1 + i, b, mode);
if( i < i0 )
{
i = i0 - VECSZ;
mode = hal::STORE_ALIGNED_NOCACHE;
}
}
}
else if( cn == 3 )
{
T* dst2 = dst[2];
for( i = 0; i < len; i += VECSZ )
{
if( i > len - VECSZ )
{
i = len - VECSZ;
mode = hal::STORE_UNALIGNED;
}
VecT a, b, c;
v_load_deinterleave(src + i*cn, a, b, c);
v_store(dst0 + i, a, mode);
v_store(dst1 + i, b, mode);
v_store(dst2 + i, c, mode);
if( i < i0 )
{
i = i0 - VECSZ;
mode = hal::STORE_ALIGNED_NOCACHE;
}
}
}
else
{
CV_Assert( cn == 4 );
T* dst2 = dst[2];
T* dst3 = dst[3];
for( i = 0; i < len; i += VECSZ )
{
if( i > len - VECSZ )
{
i = len - VECSZ;
mode = hal::STORE_UNALIGNED;
}
VecT a, b, c, d;
v_load_deinterleave(src + i*cn, a, b, c, d);
v_store(dst0 + i, a, mode);
v_store(dst1 + i, b, mode);
v_store(dst2 + i, c, mode);
v_store(dst3 + i, d, mode);
if( i < i0 )
{
i = i0 - VECSZ;
mode = hal::STORE_ALIGNED_NOCACHE;
}
}
}
vx_cleanup();
}
#endif
template<typename T> static void
split_( const T* src, T** dst, int len, int cn )
{
int k = cn % 4 ? cn % 4 : 4;
int i, j;
if( k == 1 )
{
T* dst0 = dst[0];
if(cn == 1)
{
memcpy(dst0, src, len * sizeof(T));
}
else
{
for( i = 0, j = 0 ; i < len; i++, j += cn )
dst0[i] = src[j];
}
}
else if( k == 2 )
{
T *dst0 = dst[0], *dst1 = dst[1];
i = j = 0;
for( ; i < len; i++, j += cn )
{
dst0[i] = src[j];
dst1[i] = src[j+1];
}
}
else if( k == 3 )
{
T *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2];
i = j = 0;
for( ; i < len; i++, j += cn )
{
dst0[i] = src[j];
dst1[i] = src[j+1];
dst2[i] = src[j+2];
}
}
else
{
T *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2], *dst3 = dst[3];
i = j = 0;
for( ; i < len; i++, j += cn )
{
dst0[i] = src[j]; dst1[i] = src[j+1];
dst2[i] = src[j+2]; dst3[i] = src[j+3];
}
}
for( ; k < cn; k += 4 )
{
T *dst0 = dst[k], *dst1 = dst[k+1], *dst2 = dst[k+2], *dst3 = dst[k+3];
for( i = 0, j = k; i < len; i++, j += cn )
{
dst0[i] = src[j]; dst1[i] = src[j+1];
dst2[i] = src[j+2]; dst3[i] = src[j+3];
}
}
}
void split8u(const uchar* src, uchar** dst, int len, int cn )
{
CV_INSTRUMENT_REGION();
#if (CV_SIMD || CV_SIMD_SCALABLE)
if( len >= VTraits<v_uint8>::vlanes() && 2 <= cn && cn <= 4 )
vecsplit_<uchar, v_uint8>(src, dst, len, cn);
else
#endif
split_(src, dst, len, cn);
}
void split16u(const ushort* src, ushort** dst, int len, int cn )
{
CV_INSTRUMENT_REGION();
#if (CV_SIMD || CV_SIMD_SCALABLE)
if( len >= VTraits<v_uint16>::vlanes() && 2 <= cn && cn <= 4 )
vecsplit_<ushort, v_uint16>(src, dst, len, cn);
else
#endif
split_(src, dst, len, cn);
}
void split32s(const int* src, int** dst, int len, int cn )
{
CV_INSTRUMENT_REGION();
#if (CV_SIMD || CV_SIMD_SCALABLE)
if( len >= VTraits<v_uint32>::vlanes() && 2 <= cn && cn <= 4 )
vecsplit_<int, v_int32>(src, dst, len, cn);
else
#endif
split_(src, dst, len, cn);
}
void split64s(const int64* src, int64** dst, int len, int cn )
{
CV_INSTRUMENT_REGION();
#if (CV_SIMD || CV_SIMD_SCALABLE)
if( len >= VTraits<v_int64>::vlanes() && 2 <= cn && cn <= 4 )
vecsplit_<int64, v_int64>(src, dst, len, cn);
else
#endif
split_(src, dst, len, cn);
}
#endif
CV_CPU_OPTIMIZATION_NAMESPACE_END
}} // namespace | unknown | github | https://github.com/opencv/opencv | modules/core/src/split.simd.hpp |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_vlan
version_added: "2.4"
short_description: Manages VLAN resources and attributes on Huawei CloudEngine switches.
description:
- Manages VLAN configurations on Huawei CloudEngine switches.
author: QijunPan (@QijunPan)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
vlan_id:
description:
- Single VLAN ID, in the range from 1 to 4094.
vlan_range:
description:
- Range of VLANs such as C(2-10) or C(2,5,10-15), etc.
name:
description:
- Name of VLAN, minimum of 1 character, maximum of 31 characters.
description:
description:
- Specify VLAN description, minimum of 1 character, maximum of 80 characters.
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: vlan module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Ensure a range of VLANs are not present on the switch
ce_vlan:
vlan_range: "2-10,20,50,55-60,100-150"
state: absent
provider: "{{ cli }}"
- name: Ensure VLAN 50 exists with the name WEB
ce_vlan:
vlan_id: 50
name: WEB
state: absent
provider: "{{ cli }}"
- name: Ensure VLAN is NOT on the device
ce_vlan:
vlan_id: 50
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed_vlans_list:
description: list of VLANs being proposed
returned: always
type: list
sample: ["100"]
existing_vlans_list:
description: list of existing VLANs on the switch prior to making changes
returned: always
type: list
sample: ["1", "2", "3", "4", "5", "20"]
end_state_vlans_list:
description: list of VLANs after the module is executed
returned: always
type: list
sample: ["1", "2", "3", "4", "5", "20", "100"]
proposed:
description: k/v pairs of parameters passed into module (does not include
vlan_id or vlan_range)
returned: always
type: dict
sample: {"vlan_id":"20", "name": "VLAN_APP", "description": "vlan for app" }
existing:
description: k/v pairs of existing vlan or null when using vlan_range
returned: always
type: dict
sample: {"vlan_id":"20", "name": "VLAN_APP", "description": "" }
end_state:
description: k/v pairs of the VLAN after executing module or null
when using vlan_range
returned: always
type: dict
sample: {"vlan_id":"20", "name": "VLAN_APP", "description": "vlan for app" }
updates:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "name VLAN20"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, execute_nc_action, ce_argument_spec
CE_NC_CREATE_VLAN = """
<config>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan operation="create">
<vlanId>%s</vlanId>
<vlanName>%s</vlanName>
<vlanDesc>%s</vlanDesc>
<vlanType></vlanType>
<subVlans/>
</vlan>
</vlans>
</vlan>
</config>
"""
CE_NC_DELETE_VLAN = """
<config>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan operation="delete">
<vlanId>%s</vlanId>
</vlan>
</vlans>
</vlan>
</config>
"""
CE_NC_MERGE_VLAN_DES = """
<config>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan operation="merge">
<vlanId>%s</vlanId>
<vlanDesc>%s</vlanDesc>
<vlanType></vlanType>
<subVlans/>
</vlan>
</vlans>
</vlan>
</config>
"""
CE_NC_MERGE_VLAN_NAME = """
<config>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan operation="merge">
<vlanId>%s</vlanId>
<vlanName>%s</vlanName>
<vlanType></vlanType>
<subVlans/>
</vlan>
</vlans>
</vlan>
</config>
"""
CE_NC_MERGE_VLAN = """
<config>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan operation="merge">
<vlanId>%s</vlanId>
<vlanName>%s</vlanName>
<vlanDesc>%s</vlanDesc>
<vlanType></vlanType>
<subVlans/>
</vlan>
</vlans>
</vlan>
</config>
"""
CE_NC_GET_VLAN = """
<filter type="subtree">
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan>
<vlanId>%s</vlanId>
<vlanDesc/>
<vlanName/>
</vlan>
</vlans>
</vlan>
</filter>
"""
CE_NC_GET_VLANS = """
<filter type="subtree">
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan>
<vlanId/>
<vlanName/>
</vlan>
</vlans>
</vlan>
</filter>
"""
CE_NC_CREATE_VLAN_BATCH = """
<action>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<shVlanBatchCrt>
<vlans>%s:%s</vlans>
</shVlanBatchCrt>
</vlan>
</action>
"""
CE_NC_DELETE_VLAN_BATCH = """
<action>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<shVlanBatchDel>
<vlans>%s:%s</vlans>
</shVlanBatchDel>
</vlan>
</action>
"""
class Vlan(object):
"""
Manages VLAN resources and attributes
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# vlan config info
self.vlan_id = self.module.params['vlan_id']
self.vlan_range = self.module.params['vlan_range']
self.name = self.module.params['name']
self.description = self.module.params['description']
self.state = self.module.params['state']
# state
self.changed = False
self.vlan_exist = False
self.vlan_attr_exist = None
self.vlans_list_exist = list()
self.vlans_list_change = list()
self.updates_cmd = list()
self.results = dict()
self.vlan_attr_end = dict()
def init_module(self):
"""
init ansible NetworkModule.
"""
required_one_of = [["vlan_id", "vlan_range"]]
mutually_exclusive = [["vlan_id", "vlan_range"]]
self.module = AnsibleModule(
argument_spec=self.spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def config_vlan(self, vlan_id, name='', description=''):
"""Create vlan."""
if name is None:
name = ''
if description is None:
description = ''
conf_str = CE_NC_CREATE_VLAN % (vlan_id, name, description)
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "CREATE_VLAN")
self.changed = True
def merge_vlan(self, vlan_id, name, description):
"""Merge vlan."""
conf_str = None
if not name and description:
conf_str = CE_NC_MERGE_VLAN_DES % (vlan_id, description)
if not description and name:
conf_str = CE_NC_MERGE_VLAN_NAME % (vlan_id, name)
if description and name:
conf_str = CE_NC_MERGE_VLAN % (vlan_id, name, description)
if not conf_str:
return
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "MERGE_VLAN")
self.changed = True
def create_vlan_batch(self, vlan_list):
"""Create vlan batch."""
if not vlan_list:
return
vlan_bitmap = self.vlan_list_to_bitmap(vlan_list)
xmlstr = CE_NC_CREATE_VLAN_BATCH % (vlan_bitmap, vlan_bitmap)
recv_xml = execute_nc_action(self.module, xmlstr)
self.check_response(recv_xml, "CREATE_VLAN_BATCH")
self.updates_cmd.append('vlan batch %s' % (
self.vlan_range.replace(',', ' ').replace('-', ' to ')))
self.changed = True
def delete_vlan_batch(self, vlan_list):
"""Delete vlan batch."""
if not vlan_list:
return
vlan_bitmap = self.vlan_list_to_bitmap(vlan_list)
xmlstr = CE_NC_DELETE_VLAN_BATCH % (vlan_bitmap, vlan_bitmap)
recv_xml = execute_nc_action(self.module, xmlstr)
self.check_response(recv_xml, "DELETE_VLAN_BATCH")
self.updates_cmd.append('undo vlan batch %s' % (
self.vlan_range.replace(',', ' ').replace('-', ' to ')))
self.changed = True
def undo_config_vlan(self, vlanid):
"""Delete vlan."""
conf_str = CE_NC_DELETE_VLAN % vlanid
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "DELETE_VLAN")
self.changed = True
self.updates_cmd.append('undo vlan %s' % self.vlan_id)
def get_vlan_attr(self, vlan_id):
""" get vlan attributes."""
conf_str = CE_NC_GET_VLAN % vlan_id
xml_str = get_nc_config(self.module, conf_str)
attr = dict()
if "<data/>" in xml_str:
return attr
else:
re_find_id = re.findall(r'.*<vlanId>(.*)</vlanId>.*\s*', xml_str)
re_find_name = re.findall(r'.*<vlanName>(.*)</vlanName>.*\s*', xml_str)
re_find_desc = re.findall(r'.*<vlanDesc>(.*)</vlanDesc>.*\s*', xml_str)
if re_find_id:
if re_find_name:
attr = dict(vlan_id=re_find_id[0], name=re_find_name[0],
description=re_find_desc[0])
else:
attr = dict(vlan_id=re_find_id[0], name=None,
description=re_find_desc[0])
return attr
def get_vlans_name(self):
""" get all vlan vid and its name list,
sample: [ ("20", "VLAN_NAME_20"), ("30", "VLAN_NAME_30") ]"""
conf_str = CE_NC_GET_VLANS
xml_str = get_nc_config(self.module, conf_str)
vlan_list = list()
if "<data/>" in xml_str:
return vlan_list
else:
vlan_list = re.findall(
r'.*<vlanId>(.*)</vlanId>.*\s*<vlanName>(.*)</vlanName>.*', xml_str)
return vlan_list
def get_vlans_list(self):
""" get all vlan vid list, sample: [ "20", "30", "31" ]"""
conf_str = CE_NC_GET_VLANS
xml_str = get_nc_config(self.module, conf_str)
vlan_list = list()
if "<data/>" in xml_str:
return vlan_list
else:
vlan_list = re.findall(
r'.*<vlanId>(.*)</vlanId>.*', xml_str)
return vlan_list
def vlan_series(self, vlanid_s):
""" convert vlan range to list """
vlan_list = []
peerlistlen = len(vlanid_s)
if peerlistlen != 2:
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
for num in range(peerlistlen):
if not vlanid_s[num].isdigit():
self.module.fail_json(
msg='Error: Format of vlanid is invalid.')
if int(vlanid_s[0]) > int(vlanid_s[1]):
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
elif int(vlanid_s[0]) == int(vlanid_s[1]):
vlan_list.append(str(vlanid_s[0]))
return vlan_list
for num in range(int(vlanid_s[0]), int(vlanid_s[1])):
vlan_list.append(str(num))
vlan_list.append(vlanid_s[1])
return vlan_list
def vlan_region(self, vlanid_list):
""" convert vlan range to vlan list """
vlan_list = []
peerlistlen = len(vlanid_list)
for num in range(peerlistlen):
if vlanid_list[num].isdigit():
vlan_list.append(vlanid_list[num])
else:
vlan_s = self.vlan_series(vlanid_list[num].split('-'))
vlan_list.extend(vlan_s)
return vlan_list
def vlan_range_to_list(self, vlan_range):
""" convert vlan range to vlan list """
vlan_list = self.vlan_region(vlan_range.split(','))
return vlan_list
def vlan_list_to_bitmap(self, vlanlist):
""" convert vlan list to vlan bitmap """
vlan_bit = ['0'] * 1024
bit_int = [0] * 1024
vlan_list_len = len(vlanlist)
for num in range(vlan_list_len):
tagged_vlans = int(vlanlist[num])
if tagged_vlans <= 0 or tagged_vlans > 4094:
self.module.fail_json(
msg='Error: Vlan id is not in the range from 1 to 4094.')
j = tagged_vlans // 4
bit_int[j] |= 0x8 >> (tagged_vlans % 4)
vlan_bit[j] = hex(bit_int[j])[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def check_params(self):
"""Check all input params"""
if not self.vlan_id and self.description:
self.module.fail_json(
msg='Error: Vlan description could be set only at one vlan.')
if not self.vlan_id and self.name:
self.module.fail_json(
msg='Error: Vlan name could be set only at one vlan.')
# check vlan id
if self.vlan_id:
if not self.vlan_id.isdigit():
self.module.fail_json(
msg='Error: Vlan id is not digit.')
if int(self.vlan_id) <= 0 or int(self.vlan_id) > 4094:
self.module.fail_json(
msg='Error: Vlan id is not in the range from 1 to 4094.')
# check vlan description
if self.description:
if len(self.description) > 81 or len(self.description.replace(' ', '')) < 1:
self.module.fail_json(
msg='Error: vlan description is not in the range from 1 to 80.')
# check vlan name
if self.name:
if len(self.name) > 31 or len(self.name.replace(' ', '')) < 1:
self.module.fail_json(
msg='Error: Vlan name is not in the range from 1 to 31.')
def get_proposed(self):
"""
get proposed config.
"""
if self.vlans_list_change:
if self.state == 'present':
proposed_vlans_tmp = list(self.vlans_list_change)
proposed_vlans_tmp.extend(self.vlans_list_exist)
self.results['proposed_vlans_list'] = list(
set(proposed_vlans_tmp))
else:
self.results['proposed_vlans_list'] = list(
set(self.vlans_list_exist) - set(self.vlans_list_change))
self.results['proposed_vlans_list'].sort()
else:
self.results['proposed_vlans_list'] = self.vlans_list_exist
if self.vlan_id:
if self.state == "present":
self.results['proposed'] = dict(
vlan_id=self.vlan_id,
name=self.name,
description=self.description
)
else:
self.results['proposed'] = None
else:
self.results['proposed'] = None
def get_existing(self):
"""
get existing config.
"""
self.results['existing_vlans_list'] = self.vlans_list_exist
if self.vlan_id:
if self.vlan_attr_exist:
self.results['existing'] = dict(
vlan_id=self.vlan_attr_exist['vlan_id'],
name=self.vlan_attr_exist['name'],
description=self.vlan_attr_exist['description']
)
else:
self.results['existing'] = None
else:
self.results['existing'] = None
def get_end_state(self):
"""
get end state config.
"""
self.results['end_state_vlans_list'] = self.get_vlans_list()
if self.vlan_id:
if self.vlan_attr_end:
self.results['end_state'] = dict(
vlan_id=self.vlan_attr_end['vlan_id'],
name=self.vlan_attr_end['name'],
description=self.vlan_attr_end['description']
)
else:
self.results['end_state'] = None
else:
self.results['end_state'] = None
def work(self):
"""
worker.
"""
# check param
self.check_params()
# get all vlan info
self.vlans_list_exist = self.get_vlans_list()
# get vlan attributes
if self.vlan_id:
self.vlans_list_change.append(self.vlan_id)
self.vlan_attr_exist = self.get_vlan_attr(self.vlan_id)
if self.vlan_attr_exist:
self.vlan_exist = True
if self.vlan_range:
new_vlans_tmp = self.vlan_range_to_list(self.vlan_range)
if self.state == 'present':
self.vlans_list_change = list(
set(new_vlans_tmp) - set(self.vlans_list_exist))
else:
self.vlans_list_change = [
val for val in new_vlans_tmp if val in self.vlans_list_exist]
if self.state == 'present':
if self.vlan_id:
if not self.vlan_exist:
# create a new vlan
self.config_vlan(self.vlan_id, self.name, self.description)
elif self.description and self.description != self.vlan_attr_exist['description']:
# merge vlan description
self.merge_vlan(self.vlan_id, self.name, self.description)
elif self.name and self.name != self.vlan_attr_exist['name']:
# merge vlan name
self.merge_vlan(self.vlan_id, self.name, self.description)
# update command for results
if self.changed:
self.updates_cmd.append('vlan %s' % self.vlan_id)
if self.name:
self.updates_cmd.append('name %s' % self.name)
if self.description:
self.updates_cmd.append(
'description %s' % self.description)
elif self.vlan_range and self.vlans_list_change:
self.create_vlan_batch(self.vlans_list_change)
else: # absent
if self.vlan_id:
if self.vlan_exist:
# delete the vlan
self.undo_config_vlan(self.vlan_id)
elif self.vlan_range and self.vlans_list_change:
self.delete_vlan_batch(self.vlans_list_change)
# result
if self.vlan_id:
self.vlan_attr_end = self.get_vlan_attr(self.vlan_id)
self.get_existing()
self.get_proposed()
self.get_end_state()
self.results['changed'] = self.changed
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
""" module main """
argument_spec = dict(
vlan_id=dict(required=False),
vlan_range=dict(required=False, type='str'),
name=dict(required=False, type='str'),
description=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'],
default='present', required=False),
)
argument_spec.update(ce_argument_spec)
vlancfg = Vlan(argument_spec)
vlancfg.work()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
//===--- Bridging/LangOptsBridging.cpp ------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2022-2025 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "swift/AST/ASTBridging.h"
#include "swift/Basic/LangOptions.h"
using namespace swift;
bool BridgedLangOptions_hasFeature(BridgedLangOptions cLangOpts,
BridgedFeature feature) {
return cLangOpts.unbridged().hasFeature((Feature)feature);
}
unsigned BridgedLangOptions::getTargetPointerBitWidth() const {
return unbridged().Target.isArch64Bit() ? 64
: unbridged().Target.isArch32Bit() ? 32
: unbridged().Target.isArch16Bit() ? 16
: 0;
}
BridgedEndianness BridgedLangOptions::getTargetEndianness() const {
return unbridged().Target.isLittleEndian() ? EndianLittle : EndianBig;
}
bool BridgedLangOptions::getAttachCommentsToDecls() const {
return unbridged().AttachCommentsToDecls;
}
/// Convert an array of numbers into a form we can use in Swift.
namespace {
template <typename Arr>
SwiftInt convertArray(const Arr &array, SwiftInt **cElements) {
SwiftInt numElements = array.size();
*cElements = (SwiftInt *)malloc(sizeof(SwiftInt) * numElements);
for (SwiftInt i = 0; i != numElements; ++i)
(*cElements)[i] = array[i];
return numElements;
}
} // namespace
void deallocateIntBuffer(SwiftInt *_Nullable cComponents) { free(cComponents); }
SwiftInt
BridgedLangOptions_getLanguageVersion(BridgedLangOptions cLangOpts,
SwiftInt **cComponents) {
auto theVersion = cLangOpts.unbridged().EffectiveLanguageVersion;
return convertArray(theVersion, cComponents);
}
SwiftInt
BridgedLangOptions_getCompilerVersion(BridgedLangOptions cLangOpts,
SwiftInt **cComponents) {
auto theVersion = version::Version::getCurrentLanguageVersion();
return convertArray(theVersion, cComponents);
}
SwiftInt BridgedLangOptions_getTargetAtomicBitWidths(
BridgedLangOptions cLangOpts, SwiftInt *_Nullable *_Nonnull cElements) {
return convertArray(cLangOpts.unbridged().getAtomicBitWidthValues(),
cElements);
}
namespace {
/// Describe behaviors that should prevent an attribute from being shown.
///
/// This is DeclAttrBehaviors, but with irrelevent values set to zero.
enum DeclAttrBehaviorsNotShown : uint64_t {
/// Whether this attribute is only valid when concurrency is enabled.
ConcurrencyOnly = 0,
/// True if multiple instances of this attribute are allowed on a single
/// declaration.
AllowMultipleAttributes = 0,
/// True if this is a decl modifier - i.e., that it should not be spelled
/// with an @.
DeclModifier = 1ull << 2,
/// True if this is a long attribute that should be printed on its own line.
///
/// Currently has no effect on DeclModifier attributes.
LongAttribute = 0,
/// True if this shouldn't be serialized.
NotSerialized = 0,
/// True if this attribute is only valid when parsing a .sil file.
SILOnly = 1ull << 5,
/// The attribute should be reported by parser as unknown.
RejectByParser = 1ull << 6,
/// Whether client code cannot use the attribute. Hides it in code completion.
UserInaccessible = 1ull << 7,
/// Whether adding this attribute can break API
APIBreakingToAdd = 0,
/// Whether removing this attribute can break API
APIBreakingToRemove = 0,
/// Whether adding this attribute can break ABI
ABIBreakingToAdd = 0,
/// Whether removing this attribute can break ABI
ABIBreakingToRemove = 0,
/// The opposite of APIBreakingToAdd
APIStableToAdd = 0,
/// The opposite of APIBreakingToRemove
APIStableToRemove = 0,
/// The opposite of ABIBreakingToAdd
ABIStableToAdd = 0,
/// The opposite of ABIBreakingToRemove
ABIStableToRemove = 0,
/// Attribute should not be used in an \c \@abi attribute. Use for
/// attributes which cannot affect mangled names, even indirectly, and
/// which either don't affect ABI or where ABI-only declarations get their
/// behavior from their API counterpart.
ForbiddenInABIAttr = 0,
/// Attribute can be used without restrictions in an \c \@abi attribute.
/// Use for attributes which affect mangled names but otherwise don't alter
/// the ABI, or ones where the \c ABIDeclChecker manually implements
/// special checking logic (e.g. because several different attributes
/// contribute to the same aspect of ABI in some complicated way).
UnconstrainedInABIAttr = 0,
/// Attribute can be used in an \c \@abi attribute, but must match
/// equivalent on API decl. Use for attributes which affect both mangled
/// names and other parts of the ABI such that the declaration can only be
/// valid if they match.
EquivalentInABIAttr = 0,
/// Use for attributes which are \em only valid on declarations that cannot
/// have an \c @abi attribute, such as \c ImportDecl .
UnreachableInABIAttr = 0,
};
}
void BridgedLangOptions_enumerateBuildConfigurationEntries(
BridgedLangOptions cLangOpts,
void * _Nonnull callbackContext,
void (* _Nonnull callback)(
BridgedLangOptions cLangOpts, void * _Nonnull callbackContext,
BuildConfigurationKey key, BridgedStringRef value)) {
const LangOptions &langOpts = cLangOpts.unbridged();
// Enumerate custom conditions.
for (const auto &customCondition: langOpts.getCustomConditionalCompilationFlags()) {
callback(cLangOpts, callbackContext, BCKCustomCondition,
StringRef(customCondition));
}
// Enumerate features that are enabled.
#define LANGUAGE_FEATURE(FeatureName, SENumber, Description) \
if (langOpts.hasFeature(Feature::FeatureName)) \
callback(cLangOpts, callbackContext, BCKFeature, StringRef(#FeatureName));
#include "swift/Basic/Features.def"
// Enumerate attributes that are available.
#define DECL_ATTR(SPELLING, CLASS, REQUIREMENTS, BEHAVIORS, CODE) \
if ((BEHAVIORS) == 0) \
callback(cLangOpts, callbackContext, BCKAttribute, StringRef(#SPELLING));
#include "swift/AST/DeclAttr.def"
#define SIL_TYPE_ATTR(X, C)
#define TYPE_ATTR(SPELLING, CLASS) \
callback(cLangOpts, callbackContext, BCKAttribute, StringRef(#SPELLING));
#include "swift/AST/TypeAttr.def"
// Deal with all of the target platform/architecture information.
for (const auto &[kind, value] : langOpts.getPlatformConditionValues()) {
switch (kind) {
case PlatformConditionKind::OS:
callback(cLangOpts, callbackContext, BCKTargetOSName, StringRef(value));
// Special case that macOS is an alias of OSX.
if (value == "OSX") {
callback(cLangOpts, callbackContext, BCKTargetOSName,
StringRef("macOS"));
}
break;
case PlatformConditionKind::Arch:
callback(cLangOpts, callbackContext, BCKTargetArchitecture, StringRef(value));
break;
case PlatformConditionKind::Runtime:
callback(cLangOpts, callbackContext, BCKTargetRuntime, StringRef(value));
break;
case PlatformConditionKind::TargetEnvironment:
callback(cLangOpts, callbackContext, BCKTargetEnvironment,
StringRef(value));
// When compiling for iOS we consider "macCatalyst" to be a
// synonym of "macabi". This enables the use of
// #if targetEnvironment(macCatalyst) as a compilation
// condition for macCatalyst.
if (value == "macabi" && langOpts.Target.isiOS()) {
callback(cLangOpts, callbackContext, BCKTargetEnvironment,
StringRef("macCatalyst"));
}
break;
case PlatformConditionKind::PtrAuth:
callback(cLangOpts, callbackContext, BCKTargetPointerAuthenticationScheme,
StringRef(value));
break;
case PlatformConditionKind::Endianness:
case PlatformConditionKind::PointerBitWidth:
case PlatformConditionKind::ObjectFileFormat:
case PlatformConditionKind::CanImport:
case PlatformConditionKind::HasAtomicBitWidth:
// Handled separately.
break;
}
}
// Object file format.
llvm::Triple triple(langOpts.Target.getTriple());
switch (triple.getObjectFormat()) {
case llvm::Triple::ObjectFormatType::COFF:
callback(cLangOpts, callbackContext, BCKTargetObjectFileFormat,
StringRef("COFF"));
break;
case llvm::Triple::ObjectFormatType::ELF:
callback(cLangOpts, callbackContext, BCKTargetObjectFileFormat,
StringRef("ELF"));
break;
case llvm::Triple::ObjectFormatType::MachO:
callback(cLangOpts, callbackContext, BCKTargetObjectFileFormat,
StringRef("MachO"));
break;
case llvm::Triple::ObjectFormatType::SPIRV:
callback(cLangOpts, callbackContext, BCKTargetObjectFileFormat,
StringRef("SPIRV"));
break;
case llvm::Triple::ObjectFormatType::Wasm:
callback(cLangOpts, callbackContext, BCKTargetObjectFileFormat,
StringRef("Wasm"));
break;
default:
// Ignore others.
break;
}
} | cpp | github | https://github.com/apple/swift | lib/Basic/LangOptionsBridging.cpp |
import os
import include.download_dict_sound as download_dict_sound
import sys
import glob
units_root = os.path.relpath("Units")
######## REMOVE HIDDEN (.) FILES ##########
for root, dirs, files in os.walk("./"):
for f in files:
if f.startswith(".") and "Learning-English.app" not in root and f != ".gitignore":
#os.remove(os.join(root,f))
print os.path.join(os.path.abspath(root),f)
os.remove(os.path.join(os.path.abspath(root),f))
if len(os.path.split(sys.argv[0])[0]) > 0:
os.chdir(os.path.split(sys.argv[0])[0])
########## DOWNLOAD GOOGLE SPEECH AND CONVERT TO WAVE#########
picfiles = [os.path.abspath(file) for file in glob.glob('Units/*/*/pics/*.*')]
soundfiles = [os.path.abspath(file) for file in glob.glob('Units/*/*/sounds/*.*')]
comparepicfiles = [file[:file.rindex(".")] for file in picfiles]
comparesoundfiles =[file.replace("speech_google.ogg","").replace("speech_google.wav","").replace("/sounds/","/pics/") for file in soundfiles]
compared = [os.path.split(file) for file in comparepicfiles if file not in comparesoundfiles]
print compared
for item in compared:
path, raw_word = item[0], item[1]
download_dict_sound.convert_mp3_to_wav(download_dict_sound.download_google(raw_word, path), True)
############DOWNLOAD DICT SOUNDS#################
dictsoundfiles = [x for x in os.listdir("sounds") if x.startswith(".") == False and os.path.isfile(x)]
print dictsoundfiles
all_words = []
for pic in picfiles:
f = os.path.basename(pic)
f = f[:f.rindex(".")]
f = download_dict_sound.replace_symbols(f)
f = f.lower()
f = f.replace("?","").replace("!","").replace(".", "").replace(",","")
f = f.split(" ")
all_words += f
all_words = list(set(all_words))
could_not_convert = []
for word in all_words:
if download_dict_sound.check_downloaded_word(word, "sounds") == False:
try:
downloaded_file = download_dict_sound.download(word, "sounds")
download_dict_sound.convert_mp3_to_wav(downloaded_file, True)
except:
pass
print could_not_convert | unknown | codeparrot/codeparrot-clean | ||
function useFreeze() {}
function foo() {}
function Component(props) {
const x = [];
const y = useFreeze(x);
foo(y, x);
return (
<Component>
{x}
{y}
</Component>
);
} | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/hook-call.js |
# -*- coding: utf-8 -*-
import os
import sys
import time
import logging
import threading
from itertools import chain
from .base import Cli
__all__ = ['App']
logger = logging.getLogger(__name__)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except: # pragma: no cover
MAXFD = 256
def flush_fds():
for fd in range(3, MAXFD + 1):
try:
os.fsync(fd)
except OSError:
pass
class App(Cli):
'''
Development application
:param app: iktomi app
:param shell_namespace: dict with initial namespace for shell command
:param extra_files: extra files to watch and reload if they are changed
:param bootstrap: bootstrap function before called dev server is being runned
'''
format = '%(levelname)s [%(name)s] %(message)s'
def __init__(self, app, shell_namespace=None, extra_files=None, bootstrap=None):
self.app = app
self.shell_namespace = shell_namespace or {}
self.extra_files = extra_files
self.bootstrap = bootstrap
def command_serve(self, host='', port='8000', level='debug'):
'''
Run development server with automated reload on code change::
./manage.py app:serve [host] [port] [level]
'''
logging.basicConfig(level=getattr(logging, level.upper()), format=self.format)
if self.bootstrap:
logger.info('Bootstraping...')
self.bootstrap()
try:
server_thread = DevServerThread(host, port, self.app)
server_thread.start()
wait_for_code_change(extra_files=self.extra_files)
server_thread.running = False
server_thread.join()
logger.info('Reloading...')
flush_fds()
pid = os.fork()
# We need to fork before `execvp` to perform code reload
# correctly, because we need to complete python destructors and
# `atexit`.
# This will save us from problems of incorrect exit, such as:
# - unsaved data in data storage, which does not write data
# on hard drive immediatly
# - code, that can't be measured with coverage tool, because it uses
# `atexit` handler to save coverage data
# NOTE: we using untipical fork-exec scheme with replacing
# the parent process(not the child) to preserve PID of proccess
# we use `pragma: no cover` here, because parent process cannot be
# measured with coverage since it is ends with `execvp`
if pid: # pragma: no cover
os.closerange(3, MAXFD)
os.waitpid(pid, 0)
# reloading the code in parent process
os.execvp(sys.executable, [sys.executable] + sys.argv)
else:
# we closing our recources, including file descriptors
# and performing `atexit`.
sys.exit()
except KeyboardInterrupt:
logger.info('Stoping dev-server...')
server_thread.running = False
server_thread.join()
sys.exit()
def command_shell(self):
'''
Shell command::
./manage.py app:shell
Executed with `self.shell_namespace` as local variables namespace.
'''
from code import interact
interact('Namespace {!r}'.format(self.shell_namespace),
local=self.shell_namespace)
class DevServerThread(threading.Thread):
def __init__(self, host, port, app):
from wsgiref.simple_server import make_server, WSGIServer, \
WSGIRequestHandler
self.host = host
self.port = port
class DevServer(WSGIServer):
timeout = 0.2
class RequestHandler(WSGIRequestHandler):
def address_string(slf):
# getfqdn sometimes is very slow
return '{}:{}'.format(host, port)
def log_message(self, format, *args):
logger.info("%s - - [%s] %s",
self.client_address[0],
self.log_date_time_string(),
format % args)
try:
self.port = int(port)
except ValueError:
raise ValueError(
'Please provide valid port value insted of "{}"'.format(port))
self.running = True
self.server = make_server(self.host, self.port, app, server_class=DevServer,
handler_class=RequestHandler)
super(DevServerThread, self).__init__()
def run(self):
logger.info('Devserver is running on port %s\n', self.port)
while self.running:
self.server.handle_request()
# All reloader utils are taken from werkzeug
def iter_module_files():
for module in sys.modules.values():
filename = getattr(module, '__file__', None)
if filename:
while not os.path.isfile(filename): # pragma: no cover
# NOTE: this code is needed for the cases of importing
# from archive or custom importers
# for example, if we importing from archive foo.zip
# module named zipped, then this zipped.__file__ will equal
# to foo.zip/zipped.py, and os.path.dirname will give us
# file, not directory.
# It is marked as pragma: no cover, because this code was taken
# from werkzeug and we believe that it is tested
filename = os.path.dirname(filename)
if not filename:
break
else:
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
yield filename
def wait_for_code_change(extra_files=None, interval=1):
mtimes = {}
while 1:
for filename in chain(iter_module_files(), extra_files or ()):
try:
mtime = os.stat(filename).st_mtime
except OSError: # pragma: no cover
# this is cannot be guaranteed covered by coverage because of interpreter optimization
# see https://bitbucket.org/ned/coveragepy/issues/198/continue-marked-as-not-covered#comment-4052311
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
elif mtime > old_time:
logger.info('Changes in file "%s"', filename)
return
time.sleep(interval) | unknown | codeparrot/codeparrot-clean | ||
"""
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
"""
from django.contrib.gis.gdal.datasource import DataSource
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.envelope import Envelope
from django.contrib.gis.gdal.error import GDALException, SRSException, check_err
from django.contrib.gis.gdal.geometries import OGRGeometry
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.libgdal import (
GDAL_VERSION,
gdal_full_version,
gdal_version,
)
from django.contrib.gis.gdal.raster.source import GDALRaster
from django.contrib.gis.gdal.srs import AxisOrder, CoordTransform, SpatialReference
__all__ = (
"AxisOrder",
"Driver",
"DataSource",
"CoordTransform",
"Envelope",
"GDALException",
"GDALRaster",
"GDAL_VERSION",
"OGRGeometry",
"OGRGeomType",
"SpatialReference",
"SRSException",
"check_err",
"gdal_version",
"gdal_full_version",
) | python | github | https://github.com/django/django | django/contrib/gis/gdal/__init__.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""Vispy configuration functions
"""
import os
from os import path as op
import json
import sys
import platform
import getopt
import traceback
import tempfile
import atexit
from shutil import rmtree
from .event import EmitterGroup, EventEmitter, Event
from .logs import logger, set_log_level, use_log_level
from ..ext.six import string_types, file_types
file_types = list(file_types)
try:
file_types += [tempfile._TemporaryFileWrapper] # Py3k Windows this happens
except Exception:
pass
file_types = tuple(file_types)
config = None
_data_path = None
_allowed_config_keys = None
def _init():
""" Create global Config object, parse command flags
"""
global config, _data_path, _allowed_config_keys
app_dir = _get_vispy_app_dir()
if app_dir is not None:
_data_path = op.join(app_dir, 'data')
_test_data_path = op.join(app_dir, 'test_data')
else:
_data_path = _test_data_path = None
# All allowed config keys and the types they may have
_allowed_config_keys = {
'data_path': string_types,
'default_backend': string_types,
'gl_backend': string_types,
'gl_debug': (bool,),
'glir_file': string_types+file_types,
'include_path': list,
'logging_level': string_types,
'qt_lib': string_types,
'dpi': (int, type(None)),
'profile': string_types + (type(None),),
'audit_tests': (bool,),
'test_data_path': string_types + (type(None),),
}
# Default values for all config options
default_config_options = {
'data_path': _data_path,
'default_backend': '',
'gl_backend': 'gl2',
'gl_debug': False,
'glir_file': '',
'include_path': [],
'logging_level': 'info',
'qt_lib': 'any',
'dpi': None,
'profile': None,
'audit_tests': False,
'test_data_path': _test_data_path,
}
config = Config(**default_config_options)
try:
config.update(**_load_config())
except Exception as err:
raise Exception('Error while reading vispy config file "%s":\n %s' %
(_get_config_fname(), err.message))
set_log_level(config['logging_level'])
_parse_command_line_arguments()
###############################################################################
# Command line flag parsing
VISPY_HELP = """
VisPy command line arguments:
--vispy-backend=(qt|pyqt4|pyqt5|pyside|glfw|pyglet|sdl2|wx)
Selects the backend system for VisPy to use. This will override the default
backend selection in your configuration file.
--vispy-log=(debug|info|warning|error|critical)[,search string]
Sets the verbosity of logging output. The default is 'warning'. If a search
string is given, messages will only be displayed if they match the string,
or if their call location (module.class:method(line) or
module:function(line)) matches the string.
--vispy-dpi=resolution
Force the screen resolution to a certain value (in pixels per inch). By
default, the OS is queried to determine the screen DPI.
--vispy-fps
Print the framerate (in Frames Per Second) in the console.
--vispy-gl-debug
Enables error checking for all OpenGL calls.
--vispy-glir-file
Export glir commands to specified file.
--vispy-profile=locations
Measure performance at specific code locations and display results.
*locations* may be "all" or a comma-separated list of method names like
"SceneCanvas.draw_visual".
--vispy-cprofile
Enable profiling using the built-in cProfile module and display results
when the program exits.
--vispy-audit-tests
Enable user auditing of image test results.
--vispy-help
Display this help message.
"""
def _parse_command_line_arguments():
""" Transform vispy specific command line args to vispy config.
Put into a function so that any variables dont leak in the vispy namespace.
"""
global config
# Get command line args for vispy
argnames = ['vispy-backend=', 'vispy-gl-debug', 'vispy-glir-file=',
'vispy-log=', 'vispy-help', 'vispy-profile=', 'vispy-cprofile',
'vispy-dpi=', 'vispy-audit-tests']
try:
opts, args = getopt.getopt(sys.argv[1:], '', argnames)
except getopt.GetoptError:
opts = []
# Use them to set the config values
for o, a in opts:
if o.startswith('--vispy'):
if o == '--vispy-backend':
config['default_backend'] = a
logger.info('vispy backend: %s', a)
elif o == '--vispy-gl-debug':
config['gl_debug'] = True
elif o == '--vispy-glir-file':
config['glir_file'] = a
elif o == '--vispy-log':
if ',' in a:
verbose, match = a.split(',')
else:
verbose = a
match = None
config['logging_level'] = a
set_log_level(verbose, match)
elif o == '--vispy-profile':
config['profile'] = a
elif o == '--vispy-cprofile':
_enable_profiling()
elif o == '--vispy-help':
print(VISPY_HELP)
elif o == '--vispy-dpi':
config['dpi'] = int(a)
elif o == '--vispy-audit-tests':
config['audit_tests'] = True
else:
logger.warning("Unsupported vispy flag: %s" % o)
###############################################################################
# CONFIG
# Adapted from pyzolib/paths.py:
# https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py
def _get_vispy_app_dir():
"""Helper to get the default directory for storing vispy data"""
# Define default user directory
user_dir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = path1 or path2
elif sys.platform.startswith('darwin'):
path = os.path.join(user_dir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = user_dir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None): # See application_dir() function
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath):
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
appname = '.vispy' if path == user_dir else 'vispy'
path = os.path.join(path, appname)
return path
class ConfigEvent(Event):
""" Event indicating a configuration change.
This class has a 'changes' attribute which is a dict of all name:value
pairs that have changed in the configuration.
"""
def __init__(self, changes):
Event.__init__(self, type='config_change')
self.changes = changes
class Config(object):
""" Container for global settings used application-wide in vispy.
Events:
-------
Config.events.changed - Emits ConfigEvent whenever the configuration
changes.
"""
def __init__(self, **kwargs):
self.events = EmitterGroup(source=self)
self.events['changed'] = EventEmitter(
event_class=ConfigEvent,
source=self)
self._config = {}
self.update(**kwargs)
self._known_keys = get_config_keys()
def __getitem__(self, item):
return self._config[item]
def __setitem__(self, item, val):
self._check_key_val(item, val)
self._config[item] = val
# inform any listeners that a configuration option has changed
self.events.changed(changes={item: val})
def _check_key_val(self, key, val):
global _allowed_config_keys
# check values against acceptable ones
known_keys = _allowed_config_keys
if key not in known_keys:
raise KeyError('key "%s" not in known keys: "%s"'
% (key, known_keys))
if not isinstance(val, known_keys[key]):
raise TypeError('Value for key "%s" must be one of %s, not %s.'
% (key, known_keys[key], type(val)))
def update(self, **kwargs):
for key, val in kwargs.items():
self._check_key_val(key, val)
self._config.update(kwargs)
self.events.changed(changes=kwargs)
def __repr__(self):
return repr(self._config)
def get_config_keys():
"""The config keys known by vispy and their allowed data types.
Returns
-------
keys : dict
Dict of {key: (types,)} pairs.
"""
global _allowed_config_keys
return _allowed_config_keys.copy()
def _get_config_fname():
"""Helper for the vispy config file"""
directory = _get_vispy_app_dir()
if directory is None:
return None
fname = op.join(directory, 'vispy.json')
if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:
fname = op.join(_TempDir(), 'vispy.json')
return fname
def _load_config():
"""Helper to load prefs from ~/.vispy/vispy.json"""
fname = _get_config_fname()
if fname is None or not op.isfile(fname):
return dict()
with open(fname, 'r') as fid:
config = json.load(fid)
return config
def save_config(**kwargs):
"""Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file.
"""
if kwargs == {}:
kwargs = config._config
current_config = _load_config()
current_config.update(**kwargs)
# write to disk
fname = _get_config_fname()
if fname is None:
raise RuntimeError('config filename could not be determined')
if not op.isdir(op.dirname(fname)):
os.mkdir(op.dirname(fname))
with open(fname, 'w') as fid:
json.dump(current_config, fid, sort_keys=True, indent=0)
def set_data_dir(directory=None, create=False, save=False):
"""Set vispy data download directory
Parameters
----------
directory : str | None
The directory to use.
create : bool
If True, create directory if it doesn't exist.
save : bool
If True, save the configuration to the vispy config.
"""
if directory is None:
directory = _data_path
if _data_path is None:
raise IOError('default path cannot be determined, please '
'set it manually (directory != None)')
if not op.isdir(directory):
if not create:
raise IOError('directory "%s" does not exist, perhaps try '
'create=True to create it?' % directory)
os.mkdir(directory)
config.update(data_path=directory)
if save:
save_config(data_path=directory)
def _enable_profiling():
""" Start profiling and register callback to print stats when the program
exits.
"""
import cProfile
import atexit
global _profiler
_profiler = cProfile.Profile()
_profiler.enable()
atexit.register(_profile_atexit)
_profiler = None
def _profile_atexit():
global _profiler
_profiler.print_stats(sort='cumulative')
def sys_info(fname=None, overwrite=False):
"""Get relevant system and debugging information
Parameters
----------
fname : str | None
Filename to dump info to. Use None to simply print.
overwrite : bool
If True, overwrite file (if it exists).
Returns
-------
out : str
The system information as a string.
"""
if fname is not None and op.isfile(fname) and not overwrite:
raise IOError('file exists, use overwrite=True to overwrite')
out = ''
try:
# Nest all imports here to avoid any circular imports
from ..app import use_app, Canvas
from ..app.backends import BACKEND_NAMES
from ..gloo import gl
from ..testing import has_backend
# get default app
with use_log_level('warning'):
app = use_app(call_reuse=False) # suppress messages
out += 'Platform: %s\n' % platform.platform()
out += 'Python: %s\n' % str(sys.version).replace('\n', ' ')
out += 'Backend: %s\n' % app.backend_name
for backend in BACKEND_NAMES:
if backend.startswith('ipynb_'):
continue
with use_log_level('warning', print_msg=False):
which = has_backend(backend, out=['which'])[1]
out += '{0:<9} {1}\n'.format(backend + ':', which)
out += '\n'
# We need an OpenGL context to get GL info
canvas = Canvas('Test', (10, 10), show=False, app=app)
canvas._backend._vispy_set_current()
out += 'GL version: %r\n' % (gl.glGetParameter(gl.GL_VERSION),)
x_ = gl.GL_MAX_TEXTURE_SIZE
out += 'MAX_TEXTURE_SIZE: %r\n' % (gl.glGetParameter(x_),)
out += 'Extensions: %r\n' % (gl.glGetParameter(gl.GL_EXTENSIONS),)
canvas.close()
except Exception: # don't stop printing info
out += '\nInfo-gathering error:\n%s' % traceback.format_exc()
pass
if fname is not None:
with open(fname, 'w') as fid:
fid.write(out)
return out
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules.
We cannot simply use __del__() method for cleanup here because the rmtree
function may be cleaned up before this object, so we use the atexit module
instead.
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
atexit.register(self.cleanup)
def cleanup(self):
rmtree(self._path, ignore_errors=True)
# initialize config options
_init() | unknown | codeparrot/codeparrot-clean | ||
from enigma import eDVBFrontendParametersSatellite, eDVBFrontendParametersTerrestrial, eDVBFrontendParametersCable, eDVBFrontendParameters, eDVBResourceManager, eTimer
class Tuner:
def __init__(self, frontend, ignore_rotor=False):
self.frontend = frontend
self.ignore_rotor = ignore_rotor
# transponder = (frequency, symbolrate, polarisation, fec, inversion, orbpos, system, modulation, rolloff, pilot, tsid, onid)
# 0 1 2 3 4 5 6 7 8 9 10 11
def tune(self, transponder):
if self.frontend:
print "[TuneTest] tuning to transponder with data", transponder
parm = eDVBFrontendParametersSatellite()
parm.frequency = transponder[0] * 1000
parm.symbol_rate = transponder[1] * 1000
parm.polarisation = transponder[2]
parm.fec = transponder[3]
parm.inversion = transponder[4]
parm.orbital_position = transponder[5]
parm.system = transponder[6]
parm.modulation = transponder[7]
parm.rolloff = transponder[8]
parm.pilot = transponder[9]
self.tuneSatObj(parm)
def tuneSatObj(self, transponderObj):
if self.frontend:
feparm = eDVBFrontendParameters()
feparm.setDVBS(transponderObj, self.ignore_rotor)
self.lastparm = feparm
self.frontend.tune(feparm)
def tuneTerr(self, frequency,
inversion=2, bandwidth = 7000000, fechigh = 6, feclow = 6,
modulation = 2, transmission = 2, guard = 4,
hierarchy = 4, system = 0, plpid = 0):
if self.frontend:
print "[TuneTest] tuning to transponder with data", [frequency, inversion, bandwidth, fechigh, feclow, modulation, transmission, guard, hierarchy, system, plpid]
parm = eDVBFrontendParametersTerrestrial()
parm.frequency = frequency
parm.inversion = inversion
parm.bandwidth = bandwidth
parm.code_rate_HP = fechigh
parm.code_rate_LP = feclow
parm.modulation = modulation
parm.transmission_mode = transmission
parm.guard_interval = guard
parm.hierarchy = hierarchy
parm.system = system
parm.plpid = plpid
self.tuneTerrObj(parm)
def tuneTerrObj(self, transponderObj):
if self.frontend:
feparm = eDVBFrontendParameters()
feparm.setDVBT(transponderObj)
self.lastparm = feparm
self.frontend.tune(feparm)
def tuneCab(self, transponder):
if self.frontend:
print "[TuneTest] tuning to transponder with data", transponder
parm = eDVBFrontendParametersCable()
parm.frequency = transponder[0]
parm.symbol_rate = transponder[1]
parm.modulation = transponder[2]
parm.fec_inner = transponder[3]
parm.inversion = transponder[4]
#parm.system = transponder[5]
self.tuneCabObj(parm)
def tuneCabObj(self, transponderObj):
if self.frontend:
feparm = eDVBFrontendParameters()
feparm.setDVBC(transponderObj)
self.lastparm = feparm
self.frontend.tune(feparm)
def retune(self):
if self.frontend:
self.frontend.tune(self.lastparm)
def getTransponderData(self):
ret = { }
if self.frontend:
self.frontend.getTransponderData(ret, True)
return ret
# tunes a list of transponders and checks, if they lock and optionally checks the onid/tsid combination
# 1) add transponders with addTransponder()
# 2) call run(<checkPIDs = True>)
# 3) finishedChecking() is called, when the run is finished
class TuneTest:
def __init__(self, feid, stopOnSuccess = -1, stopOnError = -1):
self.stopOnSuccess = stopOnSuccess
self.stopOnError = stopOnError
self.feid = feid
self.transponderlist = []
self.currTuned = None
print "TuneTest for feid %d" % self.feid
if not self.openFrontend():
self.oldref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.nav.stopService() # try to disable foreground service
if not self.openFrontend():
if self.session.pipshown: # try to disable pip
if hasattr(self.session, 'infobar'):
if self.session.infobar.servicelist.dopipzap:
self.session.infobar.servicelist.togglePipzap()
if hasattr(self.session, 'pip'):
del self.session.pip
self.session.pipshown = False
if not self.openFrontend():
self.frontend = None # in normal case this should not happen
self.tuner = Tuner(self.frontend)
self.timer = eTimer()
self.timer.callback.append(self.updateStatus)
def gotTsidOnid(self, tsid, onid):
print "******** got tsid, onid:", tsid, onid
if tsid is not -1 and onid is not -1:
self.pidStatus = self.INTERNAL_PID_STATUS_SUCCESSFUL
self.tsid = tsid
self.onid = onid
else:
self.pidStatus = self.INTERNAL_PID_STATUS_FAILED
self.tsid = -1
self.onid = -1
self.timer.start(100, True)
def updateStatus(self):
dict = {}
self.frontend.getFrontendStatus(dict)
stop = False
print "status:", dict
if dict["tuner_state"] == "TUNING":
print "TUNING"
self.timer.start(100, True)
self.progressCallback((self.getProgressLength(), self.tuningtransponder, self.STATUS_TUNING, self.currTuned))
elif self.checkPIDs and self.pidStatus == self.INTERNAL_PID_STATUS_NOOP:
print "2nd choice"
if dict["tuner_state"] == "LOCKED":
print "acquiring TSID/ONID"
self.raw_channel.receivedTsidOnid.get().append(self.gotTsidOnid)
self.raw_channel.requestTsidOnid()
self.pidStatus = self.INTERNAL_PID_STATUS_WAITING
else:
self.pidStatus = self.INTERNAL_PID_STATUS_FAILED
elif self.checkPIDs and self.pidStatus == self.INTERNAL_PID_STATUS_WAITING:
print "waiting for pids"
else:
if dict["tuner_state"] == "LOSTLOCK" or dict["tuner_state"] == "FAILED":
self.tuningtransponder = self.nextTransponder()
self.failedTune.append([self.currTuned, self.oldTuned, "tune_failed", dict]) # last parameter is the frontend status)
if self.stopOnError != -1 and self.stopOnError <= len(self.failedTune):
stop = True
elif dict["tuner_state"] == "LOCKED":
pidsFailed = False
if self.checkPIDs:
if self.currTuned is not None:
if self.tsid != self.currTuned[10] or self.onid != self.currTuned[11]:
self.failedTune.append([self.currTuned, self.oldTuned, "pids_failed", {"real": (self.tsid, self.onid), "expected": (self.currTuned[10], self.currTuned[11])}, dict]) # last parameter is the frontend status
pidsFailed = True
else:
self.successfullyTune.append([self.currTuned, self.oldTuned, dict]) # 3rd parameter is the frontend status
if self.stopOnSuccess != -1 and self.stopOnSuccess <= len(self.successfullyTune):
stop = True
elif not self.checkPIDs or (self.checkPids and not pidsFailed):
self.successfullyTune.append([self.currTuned, self.oldTuned, dict]) # 3rd parameter is the frontend status
if self.stopOnSuccess != -1 and self.stopOnSuccess <= len(self.successfullyTune):
stop = True
self.tuningtransponder = self.nextTransponder()
else:
print "************* tuner_state:", dict["tuner_state"]
self.progressCallback((self.getProgressLength(), self.tuningtransponder, self.STATUS_NOOP, self.currTuned))
if not stop:
self.tune()
if self.tuningtransponder < len(self.transponderlist) and not stop:
if self.pidStatus != self.INTERNAL_PID_STATUS_WAITING:
self.timer.start(100, True)
print "restart timer"
else:
print "not restarting timers (waiting for pids)"
else:
self.progressCallback((self.getProgressLength(), len(self.transponderlist), self.STATUS_DONE, self.currTuned))
print "finishedChecking"
self.finishedChecking()
def firstTransponder(self):
print "firstTransponder:"
index = 0
if self.checkPIDs:
print "checkPIDs-loop"
# check for tsid != -1 and onid != -1
print "index:", index
print "len(self.transponderlist):", len(self.transponderlist)
while index < len(self.transponderlist) and (self.transponderlist[index][10] == -1 or self.transponderlist[index][11] == -1):
index += 1
print "FirstTransponder final index:", index
return index
def nextTransponder(self):
print "getting next transponder", self.tuningtransponder
index = self.tuningtransponder + 1
if self.checkPIDs:
print "checkPIDs-loop"
# check for tsid != -1 and onid != -1
print "index:", index
print "len(self.transponderlist):", len(self.transponderlist)
while index < len(self.transponderlist) and (self.transponderlist[index][10] == -1 or self.transponderlist[index][11] == -1):
index += 1
print "next transponder index:", index
return index
def finishedChecking(self):
print "finished testing"
print "successfull:", self.successfullyTune
print "failed:", self.failedTune
def openFrontend(self):
res_mgr = eDVBResourceManager.getInstance()
if res_mgr:
self.raw_channel = res_mgr.allocateRawChannel(self.feid)
if self.raw_channel:
self.frontend = self.raw_channel.getFrontend()
if self.frontend:
return True
else:
print "getFrontend failed"
else:
print "getRawChannel failed"
else:
print "getResourceManager instance failed"
return False
def tune(self):
print "tuning to", self.tuningtransponder
if self.tuningtransponder < len(self.transponderlist):
self.pidStatus = self.INTERNAL_PID_STATUS_NOOP
self.oldTuned = self.currTuned
self.currTuned = self.transponderlist[self.tuningtransponder]
self.tuner.tune(self.transponderlist[self.tuningtransponder])
INTERNAL_PID_STATUS_NOOP = 0
INTERNAL_PID_STATUS_WAITING = 1
INTERNAL_PID_STATUS_SUCCESSFUL = 2
INTERNAL_PID_STATUS_FAILED = 3
def run(self, checkPIDs = False):
self.checkPIDs = checkPIDs
self.pidStatus = self.INTERNAL_PID_STATUS_NOOP
self.failedTune = []
self.successfullyTune = []
self.tuningtransponder = self.firstTransponder()
self.tune()
self.progressCallback((self.getProgressLength(), self.tuningtransponder, self.STATUS_START, self.currTuned))
self.timer.start(100, True)
# transponder = (frequency, symbolrate, polarisation, fec, inversion, orbpos, <system>, <modulation>, <rolloff>, <pilot>, <tsid>, <onid>)
# 0 1 2 3 4 5 6 7 8 9 10 11
def addTransponder(self, transponder):
self.transponderlist.append(transponder)
def clearTransponder(self):
self.transponderlist = []
def getProgressLength(self):
count = 0
if self.stopOnError == -1:
count = len(self.transponderlist)
else:
if count < self.stopOnError:
count = self.stopOnError
if self.stopOnSuccess == -1:
count = len(self.transponderlist)
else:
if count < self.stopOnSuccess:
count = self.stopOnSuccess
return count
STATUS_START = 0
STATUS_TUNING = 1
STATUS_DONE = 2
STATUS_NOOP = 3
# can be overwritten
# progress = (range, value, status, transponder)
def progressCallback(self, progress):
pass | unknown | codeparrot/codeparrot-clean | ||
/*!
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
import {EditorState, Extension} from '@codemirror/state';
import {
lineNumbers,
highlightActiveLineGutter,
highlightSpecialChars,
drawSelection,
dropCursor,
rectangularSelection,
crosshairCursor,
highlightActiveLine,
keymap,
EditorView,
} from '@codemirror/view';
export {EditorView} from '@codemirror/view';
import {
foldGutter,
indentOnInput,
syntaxHighlighting,
defaultHighlightStyle,
bracketMatching,
foldKeymap,
HighlightStyle,
} from '@codemirror/language';
import {history, defaultKeymap, historyKeymap, indentWithTab} from '@codemirror/commands';
import {highlightSelectionMatches, searchKeymap} from '@codemirror/search';
import {
closeBrackets,
autocompletion,
closeBracketsKeymap,
completionKeymap,
startCompletion,
} from '@codemirror/autocomplete';
import {lintKeymap} from '@codemirror/lint';
import {SYNTAX_STYLES} from './syntax-styles';
import {CODE_EDITOR_THEME_STYLES} from './theme-styles';
export const CODE_EDITOR_EXTENSIONS: Extension[] = [
lineNumbers(),
highlightActiveLineGutter(),
highlightSpecialChars(),
history(),
foldGutter(),
drawSelection(),
dropCursor(),
EditorState.allowMultipleSelections.of(true),
indentOnInput(),
bracketMatching(),
closeBrackets(),
autocompletion(),
rectangularSelection(),
crosshairCursor(),
highlightActiveLine(),
highlightSelectionMatches(),
syntaxHighlighting(defaultHighlightStyle, {fallback: true}),
syntaxHighlighting(HighlightStyle.define(SYNTAX_STYLES)),
EditorView.lineWrapping,
EditorView.theme(
CODE_EDITOR_THEME_STYLES,
// TODO: get from global theme, reconfigure on change: https://discuss.codemirror.net/t/dynamic-light-mode-dark-mode-how/4709
{dark: true},
),
keymap.of([
...closeBracketsKeymap,
...defaultKeymap,
...searchKeymap,
...historyKeymap,
...foldKeymap,
...completionKeymap,
...lintKeymap,
indentWithTab,
{
key: 'Ctrl-.',
run: startCompletion,
mac: 'Mod-.',
},
{
key: 'Mod-s',
run: () => true, // Prevent default browser save dialog
},
]),
]; | typescript | github | https://github.com/angular/angular | adev/src/app/editor/code-editor/constants/code-editor-extensions.ts |
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import errno
import glob
import json
import os
import re
import sys
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import bytes_to_human
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
# import this as a module to ensure we get the same module isntance
from ansible.module_utils.facts import timeout
def get_partition_uuid(partname):
try:
uuids = os.listdir("/dev/disk/by-uuid")
except OSError:
return
for uuid in uuids:
dev = os.path.realpath("/dev/disk/by-uuid/" + uuid)
if dev == ("/dev/" + partname):
return uuid
return None
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
# Originally only had these four as toplevelfacts
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
# Now we have all of these in a dict structure
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
# regex used against findmnt output to detect bind mounts
BIND_MOUNT_RE = re.compile(r'.*\]')
# regex used against mtab content to find entries that are bind mounts
MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"')
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
device_facts = self.get_device_facts()
uptime_facts = self.get_uptime_facts()
lvm_facts = self.get_lvm_facts()
mount_facts = {}
try:
mount_facts = self.get_mount_facts()
except timeout.TimeoutError:
pass
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(device_facts)
hardware_facts.update(uptime_facts)
hardware_facts.update(lvm_facts)
hardware_facts.update(mount_facts)
return hardware_facts
def get_memory_facts(self):
memory_facts = {}
if not os.access("/proc/meminfo", os.R_OK):
return memory_facts
memstats = {}
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in self.ORIGINAL_MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memory_facts["%s_mb" % key.lower()] = int(val) // 1024
if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memstats[key.lower()] = int(val) // 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
memory_facts['memory_mb'] = {
'real': {
'total': memstats.get('memtotal'),
'used': memstats.get('real:used'),
'free': memstats.get('memfree'),
},
'nocache': {
'free': memstats.get('nocache:free'),
'used': memstats.get('nocache:used'),
},
'swap': {
'total': memstats.get('swaptotal'),
'free': memstats.get('swapfree'),
'used': memstats.get('swap:used'),
'cached': memstats.get('swapcached'),
},
}
return memory_facts
def get_cpu_facts(self, collected_facts=None):
cpu_facts = {}
collected_facts = collected_facts or {}
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
else:
for line in get_file_lines('/sys/hypervisor/type'):
if line.strip() == 'xen':
xen = True
# Only interested in the first line
break
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return cpu_facts
cpu_facts['processor'] = []
for line in get_file_lines('/proc/cpuinfo'):
data = line.split(":", 1)
key = data[0].strip()
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in data:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']:
if 'processor' not in cpu_facts:
cpu_facts['processor'] = []
cpu_facts['processor'].append(data[1].strip())
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
cpu_facts['processor_cores'] = int(data[1].strip())
# Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le)
if vendor_id_occurrence > 0:
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
# FIXME
if collected_facts.get('ansible_architecture') != 's390x':
if xen_paravirt:
cpu_facts['processor_count'] = i
cpu_facts['processor_cores'] = i
cpu_facts['processor_threads_per_core'] = 1
cpu_facts['processor_vcpus'] = i
else:
if sockets:
cpu_facts['processor_count'] = len(sockets)
else:
cpu_facts['processor_count'] = i
socket_values = list(sockets.values())
if socket_values and socket_values[0]:
cpu_facts['processor_cores'] = socket_values[0]
else:
cpu_facts['processor_cores'] = 1
core_values = list(cores.values())
if core_values:
cpu_facts['processor_threads_per_core'] = core_values[0] // cpu_facts['processor_cores']
else:
cpu_facts['processor_threads_per_core'] = 1 // cpu_facts['processor_cores']
cpu_facts['processor_vcpus'] = (cpu_facts['processor_threads_per_core'] *
cpu_facts['processor_count'] * cpu_facts['processor_cores'])
return cpu_facts
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
dmi_facts = {}
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = ["Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade"]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key, path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
dmi_facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError:
dmi_facts['form_factor'] = 'unknown (%s)' % data
else:
dmi_facts[key] = data
else:
dmi_facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
dmi_facts[k] = thisvalue
else:
dmi_facts[k] = 'NA'
else:
dmi_facts[k] = 'NA'
return dmi_facts
def _run_lsblk(self, lsblk_path):
# call lsblk and collect all uuids
# --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
# this uses the linux major device number
# for details see https://www.kernel.org/doc/Documentation/devices.txt
args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2']
cmd = [lsblk_path] + args
rc, out, err = self.module.run_command(cmd)
return rc, out, err
def _lsblk_uuid(self):
uuids = {}
lsblk_path = self.module.get_bin_path("lsblk")
if not lsblk_path:
return uuids
rc, out, err = self._run_lsblk(lsblk_path)
if rc != 0:
return uuids
# each line will be in format:
# <devicename><some whitespace><uuid>
# /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
for lsblk_line in out.splitlines():
if not lsblk_line:
continue
line = lsblk_line.strip()
fields = line.rsplit(None, 1)
if len(fields) < 2:
continue
device_name, uuid = fields[0].strip(), fields[1].strip()
if device_name in uuids:
continue
uuids[device_name] = uuid
return uuids
def _run_findmnt(self, findmnt_path):
args = ['--list', '--noheadings', '--notruncate']
cmd = [findmnt_path] + args
rc, out, err = self.module.run_command(cmd, errors='surrogate_then_replace')
return rc, out, err
def _find_bind_mounts(self):
bind_mounts = set()
findmnt_path = self.module.get_bin_path("findmnt")
if not findmnt_path:
return bind_mounts
rc, out, err = self._run_findmnt(findmnt_path)
if rc != 0:
return bind_mounts
# find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
for line in out.splitlines():
fields = line.split()
# fields[0] is the TARGET, fields[1] is the SOURCE
if len(fields) < 2:
continue
# bind mounts will have a [/directory_name] in the SOURCE column
if self.BIND_MOUNT_RE.match(fields[1]):
bind_mounts.add(fields[0])
return bind_mounts
def _mtab_entries(self):
mtab_file = '/etc/mtab'
if not os.path.exists(mtab_file):
mtab_file = '/proc/mounts'
mtab = get_file_content(mtab_file, '')
mtab_entries = []
for line in mtab.splitlines():
fields = line.split()
if len(fields) < 4:
continue
mtab_entries.append(fields)
return mtab_entries
@timeout.timeout()
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
bind_mounts = self._find_bind_mounts()
uuids = self._lsblk_uuid()
mtab_entries = self._mtab_entries()
mounts = []
for fields in mtab_entries:
device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
if not device.startswith('/') and ':/' not in device:
continue
if fstype == 'none':
continue
mount_statvfs_info = get_mount_size(mount)
if mount in bind_mounts:
# only add if not already there, we might have a plain /etc/mtab
if not self.MTAB_BIND_MOUNT_RE.match(options):
options += ",bind"
mount_info = {'mount': mount,
'device': device,
'fstype': fstype,
'options': options,
'uuid': uuids.get(device, 'N/A')}
mount_info.update(mount_statvfs_info)
mounts.append(mount_info)
mount_facts['mounts'] = mounts
return mount_facts
def get_device_links(self, link_dir):
if not os.path.exists(link_dir):
return {}
try:
retval = collections.defaultdict(set)
for entry in os.listdir(link_dir):
try:
target = os.path.basename(os.readlink(os.path.join(link_dir, entry)))
retval[target].add(entry)
except OSError:
continue
return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
except OSError:
return {}
def get_all_device_owners(self):
try:
retval = collections.defaultdict(set)
for path in glob.glob('/sys/block/*/slaves/*'):
elements = path.split('/')
device = elements[3]
target = elements[5]
retval[target].add(device)
return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
except OSError:
return {}
def get_all_device_links(self):
return {
'ids': self.get_device_links('/dev/disk/by-id'),
'uuids': self.get_device_links('/dev/disk/by-uuid'),
'labels': self.get_device_links('/dev/disk/by-label'),
'masters': self.get_all_device_owners(),
}
def get_holders(self, block_dev_dict, sysdir):
block_dev_dict['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
block_dev_dict['holders'].append(name)
else:
block_dev_dict['holders'].append(folder)
def get_device_facts(self):
device_facts = {}
device_facts['devices'] = {}
lspci = self.module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace')
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return device_facts
devs_wwn = {}
try:
devs_by_id = os.listdir("/dev/disk/by-id")
except OSError:
pass
else:
for link_name in devs_by_id:
if link_name.startswith("wwn-"):
try:
wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name))
except OSError:
continue
devs_wwn[os.path.basename(wwn_link)] = link_name[4:]
links = self.get_all_device_links()
device_facts['device_links'] = links
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
d = {}
d['virtual'] = virtual
d['links'] = {}
for (link_type, link_values) in iteritems(links):
d['links'][link_type] = link_values.get(block, [])
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
d[key] = get_file_content(sysdir + "/device/" + key)
sg_inq = self.module.get_bin_path('sg_inq')
if sg_inq:
device = "/dev/%s" % (block)
rc, drivedata, err = self.module.run_command([sg_inq, device])
if rc == 0:
serial = re.search("Unit serial number:\s+(\w+)", drivedata)
if serial:
d['serial'] = serial.group(1)
for key in ['vendor', 'model']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key, test in [('removable', '/removable'),
('support_discard', '/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
if diskname in devs_wwn:
d['wwn'] = devs_wwn[diskname]
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + "\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['links'] = {}
for (link_type, link_values) in iteritems(links):
part['links'][link_type] = link_values.get(partname, [])
part['start'] = get_file_content(part_sysdir + "/start", 0)
part['sectors'] = get_file_content(part_sysdir + "/size", 0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512)
part['size'] = bytes_to_human((float(part['sectors']) * float(part['sectorsize'])))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512)
d['size'] = bytes_to_human(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
self.get_holders(d, sysdir)
device_facts['devices'][diskname] = d
return device_facts
def get_uptime_facts(self):
uptime_facts = {}
uptime_file_content = get_file_content('/proc/uptime')
if uptime_file_content:
uptime_seconds_string = uptime_file_content.split(' ')[0]
uptime_facts['uptime_seconds'] = int(float(uptime_seconds_string))
return uptime_facts
def _find_mapper_device_name(self, dm_device):
dm_prefix = '/dev/dm-'
mapper_device = dm_device
if dm_device.startswith(dm_prefix):
dmsetup_cmd = self.module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/'
rc, dm_name, err = self.module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
if rc == 0:
mapper_device = mapper_prefix + dm_name.rstrip()
return mapper_device
def get_lvm_facts(self):
""" Get LVM Facts if running as root and lvm utils are available """
lvm_facts = {}
if os.getuid() == 0 and self.module.get_bin_path('vgs'):
lvm_util_options = '--noheadings --nosuffix --units g --separator ,'
vgs_path = self.module.get_bin_path('vgs')
# vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs = {}
if vgs_path:
rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options))
for vg_line in vg_lines.splitlines():
items = vg_line.strip().split(',')
vgs[items[0]] = {'size_g': items[-2],
'free_g': items[-1],
'num_lvs': items[2],
'num_pvs': items[1]}
lvs_path = self.module.get_bin_path('lvs')
# lvs fields:
# LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
lvs = {}
if lvs_path:
rc, lv_lines, err = self.module.run_command('%s %s' % (lvs_path, lvm_util_options))
for lv_line in lv_lines.splitlines():
items = lv_line.strip().split(',')
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
pvs_path = self.module.get_bin_path('pvs')
# pvs fields: PV VG #Fmt #Attr PSize PFree
pvs = {}
if pvs_path:
rc, pv_lines, err = self.module.run_command('%s %s' % (pvs_path, lvm_util_options))
for pv_line in pv_lines.splitlines():
items = pv_line.strip().split(',')
pvs[self._find_mapper_device_name(items[0])] = {
'size_g': items[4],
'free_g': items[5],
'vg': items[1]}
lvm_facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs}
return lvm_facts
class LinuxHardwareCollector(HardwareCollector):
_platform = 'Linux'
_fact_class = LinuxHardware | unknown | codeparrot/codeparrot-clean | ||
/** @import { AST } from '#compiler' */
/** @import { Context } from '../types' */
import * as e from '../../../errors.js';
import { is_event_attribute } from '../../../utils/ast.js';
import { disallow_children } from './shared/special-element.js';
/**
* @param {AST.SvelteBody} node
* @param {Context} context
*/
export function SvelteBody(node, context) {
disallow_children(node);
for (const attribute of node.attributes) {
if (
attribute.type === 'SpreadAttribute' ||
(attribute.type === 'Attribute' && !is_event_attribute(attribute))
) {
e.svelte_body_illegal_attribute(attribute);
}
}
context.next();
} | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/src/compiler/phases/2-analyze/visitors/SvelteBody.js |
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from .gigya import GigyaBaseIE
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
strip_or_none,
float_or_none,
int_or_none,
merge_dicts,
parse_iso8601,
str_or_none,
url_or_none,
)
class CanvasIE(InfoExtractor):
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrt(?:video|nieuws)|sporza)/assets/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'md5': '68993eda72ef62386a15ea2cf3c93107',
'info_dict': {
'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'ext': 'mp4',
'title': 'Nachtwacht: De Greystook',
'description': 'Nachtwacht: De Greystook',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1468.04,
},
'expected_warnings': ['is not a supported codec', 'Unknown MIME type'],
}, {
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'only_matching': True,
}]
_HLS_ENTRY_PROTOCOLS_MAP = {
'HLS': 'm3u8_native',
'HLS_AES': 'm3u8',
}
_REST_API_BASE = 'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v1'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site_id, video_id = mobj.group('site_id'), mobj.group('id')
# Old API endpoint, serves more formats but may fail for some videos
data = self._download_json(
'https://mediazone.vrt.be/api/v1/%s/assets/%s'
% (site_id, video_id), video_id, 'Downloading asset JSON',
'Unable to download asset JSON', fatal=False)
# New API endpoint
if not data:
token = self._download_json(
'%s/tokens' % self._REST_API_BASE, video_id,
'Downloading token', data=b'',
headers={'Content-Type': 'application/json'})['vrtPlayerToken']
data = self._download_json(
'%s/videos/%s' % (self._REST_API_BASE, video_id),
video_id, 'Downloading video JSON', fatal=False, query={
'vrtPlayerToken': token,
'client': '%s@PROD' % site_id,
}, expected_status=400)
message = data.get('message')
if message and not data.get('title'):
if data.get('code') == 'AUTHENTICATION_REQUIRED':
self.raise_login_required(message)
raise ExtractorError(message, expected=True)
title = data['title']
description = data.get('description')
formats = []
for target in data['targetUrls']:
format_url, format_type = url_or_none(target.get('url')), str_or_none(target.get('type'))
if not format_url or not format_type:
continue
format_type = format_type.upper()
if format_type in self._HLS_ENTRY_PROTOCOLS_MAP:
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', self._HLS_ENTRY_PROTOCOLS_MAP[format_type],
m3u8_id=format_type, fatal=False))
elif format_type == 'HDS':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_type, fatal=False))
elif format_type == 'MPEG_DASH':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id=format_type, fatal=False))
elif format_type == 'HSS':
formats.extend(self._extract_ism_formats(
format_url, video_id, ism_id='mss', fatal=False))
else:
formats.append({
'format_id': format_type,
'url': format_url,
})
self._sort_formats(formats)
subtitles = {}
subtitle_urls = data.get('subtitleUrls')
if isinstance(subtitle_urls, list):
for subtitle in subtitle_urls:
subtitle_url = subtitle.get('url')
if subtitle_url and subtitle.get('type') == 'CLOSED':
subtitles.setdefault('nl', []).append({'url': subtitle_url})
return {
'id': video_id,
'display_id': video_id,
'title': title,
'description': description,
'formats': formats,
'duration': float_or_none(data.get('duration'), 1000),
'thumbnail': data.get('posterImageUrl'),
'subtitles': subtitles,
}
class CanvasEenIE(InfoExtractor):
IE_DESC = 'canvas.be and een.be'
_VALID_URL = r'https?://(?:www\.)?(?P<site_id>canvas|een)\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
'md5': 'ed66976748d12350b118455979cca293',
'info_dict': {
'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'display_id': 'de-afspraak-veilt-voor-de-warmste-week',
'ext': 'flv',
'title': 'De afspraak veilt voor de Warmste Week',
'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 49.02,
},
'expected_warnings': ['is not a supported codec'],
}, {
# with subtitles
'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
'info_dict': {
'id': 'mz-ast-5240ff21-2d30-4101-bba6-92b5ec67c625',
'display_id': 'pieter-0167',
'ext': 'mp4',
'title': 'Pieter 0167',
'description': 'md5:943cd30f48a5d29ba02c3a104dc4ec4e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2553.08,
'subtitles': {
'nl': [{
'ext': 'vtt',
}],
},
},
'params': {
'skip_download': True,
},
'skip': 'Pagina niet gevonden',
}, {
'url': 'https://www.een.be/thuis/emma-pakt-thilly-aan',
'info_dict': {
'id': 'md-ast-3a24ced2-64d7-44fb-b4ed-ed1aafbf90b8',
'display_id': 'emma-pakt-thilly-aan',
'ext': 'mp4',
'title': 'Emma pakt Thilly aan',
'description': 'md5:c5c9b572388a99b2690030afa3f3bad7',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 118.24,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['is not a supported codec'],
}, {
'url': 'https://www.canvas.be/check-point/najaar-2016/de-politie-uw-vriend',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site_id, display_id = mobj.group('site_id'), mobj.group('id')
webpage = self._download_webpage(url, display_id)
title = strip_or_none(self._search_regex(
r'<h1[^>]+class="video__body__header__title"[^>]*>(.+?)</h1>',
webpage, 'title', default=None) or self._og_search_title(
webpage, default=None))
video_id = self._html_search_regex(
r'data-video=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id',
group='id')
return {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (site_id, video_id),
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
}
class VrtNUIE(GigyaBaseIE):
IE_DESC = 'VrtNU.be'
_VALID_URL = r'https?://(?:www\.)?vrt\.be/(?P<site_id>vrtnu)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
# Available via old API endpoint
'url': 'https://www.vrt.be/vrtnu/a-z/postbus-x/1/postbus-x-s1a1/',
'info_dict': {
'id': 'pbs-pub-2e2d8c27-df26-45c9-9dc6-90c78153044d$vid-90c932b1-e21d-4fb8-99b1-db7b49cf74de',
'ext': 'mp4',
'title': 'De zwarte weduwe',
'description': 'md5:db1227b0f318c849ba5eab1fef895ee4',
'duration': 1457.04,
'thumbnail': r're:^https?://.*\.jpg$',
'season': 'Season 1',
'season_number': 1,
'episode_number': 1,
},
'skip': 'This video is only available for registered users',
'params': {
'username': '<snip>',
'password': '<snip>',
},
'expected_warnings': ['is not a supported codec'],
}, {
# Only available via new API endpoint
'url': 'https://www.vrt.be/vrtnu/a-z/kamp-waes/1/kamp-waes-s1a5/',
'info_dict': {
'id': 'pbs-pub-0763b56c-64fb-4d38-b95b-af60bf433c71$vid-ad36a73c-4735-4f1f-b2c0-a38e6e6aa7e1',
'ext': 'mp4',
'title': 'Aflevering 5',
'description': 'Wie valt door de mand tijdens een missie?',
'duration': 2967.06,
'season': 'Season 1',
'season_number': 1,
'episode_number': 5,
},
'skip': 'This video is only available for registered users',
'params': {
'username': '<snip>',
'password': '<snip>',
},
'expected_warnings': ['Unable to download asset JSON', 'is not a supported codec', 'Unknown MIME type'],
}]
_NETRC_MACHINE = 'vrtnu'
_APIKEY = '3_0Z2HujMtiWq_pkAjgnS2Md2E11a1AwZjYiBETtwNE-EoEHDINgtnvcAOpNgmrVGy'
_CONTEXT_ID = 'R3595707040'
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
auth_data = {
'APIKey': self._APIKEY,
'targetEnv': 'jssdk',
'loginID': username,
'password': password,
'authMode': 'cookie',
}
auth_info = self._gigya_login(auth_data)
# Sometimes authentication fails for no good reason, retry
login_attempt = 1
while login_attempt <= 3:
try:
# When requesting a token, no actual token is returned, but the
# necessary cookies are set.
self._request_webpage(
'https://token.vrt.be',
None, note='Requesting a token', errnote='Could not get a token',
headers={
'Content-Type': 'application/json',
'Referer': 'https://www.vrt.be/vrtnu/',
},
data=json.dumps({
'uid': auth_info['UID'],
'uidsig': auth_info['UIDSignature'],
'ts': auth_info['signatureTimestamp'],
'email': auth_info['profile']['email'],
}).encode('utf-8'))
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
login_attempt += 1
self.report_warning('Authentication failed')
self._sleep(1, None, msg_template='Waiting for %(timeout)s seconds before trying again')
else:
raise e
else:
break
def _real_extract(self, url):
display_id = self._match_id(url)
webpage, urlh = self._download_webpage_handle(url, display_id)
info = self._search_json_ld(webpage, display_id, default={})
# title is optional here since it may be extracted by extractor
# that is delegated from here
title = strip_or_none(self._html_search_regex(
r'(?ms)<h1 class="content__heading">(.+?)</h1>',
webpage, 'title', default=None))
description = self._html_search_regex(
r'(?ms)<div class="content__description">(.+?)</div>',
webpage, 'description', default=None)
season = self._html_search_regex(
[r'''(?xms)<div\ class="tabs__tab\ tabs__tab--active">\s*
<span>seizoen\ (.+?)</span>\s*
</div>''',
r'<option value="seizoen (\d{1,3})" data-href="[^"]+?" selected>'],
webpage, 'season', default=None)
season_number = int_or_none(season)
episode_number = int_or_none(self._html_search_regex(
r'''(?xms)<div\ class="content__episode">\s*
<abbr\ title="aflevering">afl</abbr>\s*<span>(\d+)</span>
</div>''',
webpage, 'episode_number', default=None))
release_date = parse_iso8601(self._html_search_regex(
r'(?ms)<div class="content__broadcastdate">\s*<time\ datetime="(.+?)"',
webpage, 'release_date', default=None))
# If there's a ? or a # in the URL, remove them and everything after
clean_url = urlh.geturl().split('?')[0].split('#')[0].strip('/')
securevideo_url = clean_url + '.mssecurevideo.json'
try:
video = self._download_json(securevideo_url, display_id)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
self.raise_login_required()
raise
# We are dealing with a '../<show>.relevant' URL
redirect_url = video.get('url')
if redirect_url:
return self.url_result(self._proto_relative_url(redirect_url, 'https:'))
# There is only one entry, but with an unknown key, so just get
# the first one
video_id = list(video.values())[0].get('videoid')
return merge_dicts(info, {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/vrtvideo/assets/%s' % video_id,
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'season': season,
'season_number': season_number,
'episode_number': episode_number,
'release_date': release_date,
}) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
'''
example to show optical flow
USAGE: opt_flow.py [<video_source>]
Keys:
1 - toggle HSV flow visualization
2 - toggle glitch
Keys:
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
import video
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:,:,0], flow[:,:,1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx*fx+fy*fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = np.minimum(v*4, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
if __name__ == '__main__':
import sys
print(__doc__)
try:
fn = sys.argv[1]
except IndexError:
fn = 0
cam = video.create_capture(fn)
ret, prev = cam.read()
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
show_hsv = False
show_glitch = False
cur_glitch = prev.copy()
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
prevgray = gray
cv2.imshow('flow', draw_flow(gray, flow))
if show_hsv:
cv2.imshow('flow HSV', draw_hsv(flow))
if show_glitch:
cur_glitch = warp_flow(cur_glitch, flow)
cv2.imshow('glitch', cur_glitch)
ch = cv2.waitKey(5)
if ch == 27:
break
if ch == ord('1'):
show_hsv = not show_hsv
print('HSV flow visualization is', ['off', 'on'][show_hsv])
if ch == ord('2'):
show_glitch = not show_glitch
if show_glitch:
cur_glitch = img.copy()
print('glitch is', ['off', 'on'][show_glitch])
cv2.destroyAllWindows() | unknown | codeparrot/codeparrot-clean | ||
import { test } from '../../test';
export default test({
props: {
foo: true,
bar: true
},
trim_whitespace: false,
snapshot(target) {
const div = target.querySelector('div');
const ps = target.querySelectorAll('p');
return {
div,
p0: ps[0],
p1: ps[1]
};
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/hydration/samples/if-block-anchor/_config.js |
class ApiException(Exception):
def __init__(self, type, message=None, code=None, body=None):
super(ApiException, self).__init__(message)
self.type = type
self.code = code
self.body = body
@staticmethod
def create(type, message=None, code=None, body=None):
if type == 'request-error':
return RequestException(message, code, body)
elif type == 'authentication-error':
return AuthenticationException(message, code, body)
elif type == 'server-error':
return ServerException(message, code, body)
elif type == 'validation-error':
return ValidationException(message, code, body)
elif type == 'resource-error':
return ResourceException(message, code, body)
elif type == 'card-error':
return CardException(message, code, body)
class InvalidSignatureException(Exception):
pass
class RequestException(ApiException):
def __init__(self, message=None, code=None, body=None):
super(RequestException, self).__init__('request-error', message, code, body)
class AuthenticationException(ApiException):
def __init__(self, message=None, code=None, body=None):
super(AuthenticationException, self).__init__('authentication-error', message, code, body)
class ServerException(ApiException):
def __init__(self, message=None, code=None, body=None):
super(ServerException, self).__init__('server-error', message, code, body)
class ValidationException(ApiException):
def __init__(self, message=None, code=None, body=None):
super(ValidationException, self).__init__('validation-error', message, code, body)
class ResourceException(ApiException):
def __init__(self, message=None, code=None, body=None):
super(ResourceException, self).__init__('resource-error', message, code, body)
class CardException(ApiException):
def __init__(self, message=None, code=None, body=None):
super(CardException, self).__init__('card-error', message, code, body) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Example use of feedcache.Cache.
"""
__module_id__ = "$Id$"
#
# Import system modules
#
import sys
import shelve
#
# Import local modules
#
import cache
#
# Module
#
def main(urls=[]):
print 'Saving feed data to ./.feedcache'
storage = shelve.open('.feedcache')
try:
fc = cache.Cache(storage)
for url in urls:
parsed_data = fc.fetch(url)
print parsed_data.feed.title
for entry in parsed_data.entries:
print '\t', entry.title
finally:
storage.close()
return
if __name__ == '__main__':
main(sys.argv[1:]) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from functools import wraps
import logging
import os
from sqlalchemy import event, exc
from sqlalchemy.pool import Pool
from airflow import settings
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
needs_session = False
arg_session = 'session'
func_params = func.__code__.co_varnames
session_in_args = arg_session in func_params and \
func_params.index(arg_session) < len(args)
if not (arg_session in kwargs or session_in_args):
needs_session = True
session = settings.Session()
kwargs[arg_session] = session
result = func(*args, **kwargs)
if needs_session:
session.expunge_all()
session.commit()
session.close()
return result
return wrapper
def pessimistic_connection_handling():
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
'''
Disconnect Handling - Pessimistic, taken from:
http://docs.sqlalchemy.org/en/rel_0_9/core/pooling.html
'''
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except:
raise exc.DisconnectionError()
cursor.close()
@provide_session
def merge_conn(conn, session=None):
from airflow import models
C = models.Connection
if not session.query(C).filter(C.conn_id == conn.conn_id).first():
session.add(conn)
session.commit()
@event.listens_for(settings.engine, "connect")
def connect(dbapi_connection, connection_record):
connection_record.info['pid'] = os.getpid()
@event.listens_for(settings.engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info['pid'] != pid:
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid {}, "
"attempting to check out in pid {}".format(connection_record.info['pid'], pid)
)
def initdb():
session = settings.Session()
from airflow import models
upgradedb()
merge_conn(
models.Connection(
conn_id='airflow_db', conn_type='mysql',
host='localhost', login='root', password='',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='airflow_ci', conn_type='mysql',
host='localhost', login='root', extra="{\"local_infile\": true}",
schema='airflow_ci'))
merge_conn(
models.Connection(
conn_id='beeline_default', conn_type='beeline', port="10000",
host='localhost', extra="{\"use_beeline\": true, \"auth\": \"\"}",
schema='default'))
merge_conn(
models.Connection(
conn_id='bigquery_default', conn_type='bigquery'))
merge_conn(
models.Connection(
conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow', password='airflow',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='presto_default', conn_type='presto',
host='localhost',
schema='hive', port=3400))
merge_conn(
models.Connection(
conn_id='google_cloud_default', conn_type='google_cloud_platform',
schema='default',))
merge_conn(
models.Connection(
conn_id='hive_cli_default', conn_type='hive_cli',
schema='default',))
merge_conn(
models.Connection(
conn_id='hiveserver2_default', conn_type='hiveserver2',
host='localhost',
schema='default', port=10000))
merge_conn(
models.Connection(
conn_id='metastore_default', conn_type='hive_metastore',
host='localhost', extra="{\"authMechanism\": \"PLAIN\"}",
port=9083))
merge_conn(
models.Connection(
conn_id='mysql_default', conn_type='mysql',
login='root',
host='localhost'))
merge_conn(
models.Connection(
conn_id='postgres_default', conn_type='postgres',
login='postgres',
schema='airflow',
host='localhost'))
merge_conn(
models.Connection(
conn_id='sqlite_default', conn_type='sqlite',
host='/tmp/sqlite_default.db'))
merge_conn(
models.Connection(
conn_id='http_default', conn_type='http',
host='https://www.google.com/'))
merge_conn(
models.Connection(
conn_id='mssql_default', conn_type='mssql',
host='localhost', port=1433))
merge_conn(
models.Connection(
conn_id='vertica_default', conn_type='vertica',
host='localhost', port=5433))
merge_conn(
models.Connection(
conn_id='wasb_default', conn_type='wasb',
extra='{"sas_token": null}'))
merge_conn(
models.Connection(
conn_id='webhdfs_default', conn_type='hdfs',
host='localhost', port=50070))
merge_conn(
models.Connection(
conn_id='ssh_default', conn_type='ssh',
host='localhost'))
merge_conn(
models.Connection(
conn_id='fs_default', conn_type='fs',
extra='{"path": "/"}'))
merge_conn(
models.Connection(
conn_id='aws_default', conn_type='aws',
extra='{"region_name": "us-east-1"}'))
merge_conn(
models.Connection(
conn_id='spark_default', conn_type='spark',
host='yarn', extra='{"queue": "root.default"}'))
merge_conn(
models.Connection(
conn_id='redis_default', conn_type='redis',
host='localhost', port=6379,
extra='{"db": 0}'))
merge_conn(
models.Connection(
conn_id='sqoop_default', conn_type='sqoop',
host='rmdbs', extra=''))
merge_conn(
models.Connection(
conn_id='emr_default', conn_type='emr',
extra='''
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Slave nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
]
},
"Ec2KeyName": "mykey",
"KeepJobFlowAliveWhenNoSteps": false,
"TerminationProtected": false,
"Ec2SubnetId": "somesubnet",
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
'''))
merge_conn(
models.Connection(
conn_id='databricks_default', conn_type='databricks',
host='localhost'))
# Known event types
KET = models.KnownEventType
if not session.query(KET).filter(KET.know_event_type == 'Holiday').first():
session.add(KET(know_event_type='Holiday'))
if not session.query(KET).filter(KET.know_event_type == 'Outage').first():
session.add(KET(know_event_type='Outage'))
if not session.query(KET).filter(
KET.know_event_type == 'Natural Disaster').first():
session.add(KET(know_event_type='Natural Disaster'))
if not session.query(KET).filter(
KET.know_event_type == 'Marketing Campaign').first():
session.add(KET(know_event_type='Marketing Campaign'))
session.commit()
dagbag = models.DagBag()
# Save individual DAGs in the ORM
now = datetime.utcnow()
for dag in dagbag.dags.values():
models.DAG.sync_to_db(dag, dag.owner, now)
# Deactivate the unknown ones
models.DAG.deactivate_unknown_dags(dagbag.dags.keys())
Chart = models.Chart
chart_label = "Airflow task instance by type"
chart = session.query(Chart).filter(Chart.label == chart_label).first()
if not chart:
chart = Chart(
label=chart_label,
conn_id='airflow_db',
chart_type='bar',
x_is_date=False,
sql=(
"SELECT state, COUNT(1) as number "
"FROM task_instance "
"WHERE dag_id LIKE 'example%' "
"GROUP BY state"),
)
session.add(chart)
session.commit()
def upgradedb():
# alembic adds significant import time, so we import it lazily
from alembic import command
from alembic.config import Config
logging.info("Creating tables")
current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.normpath(os.path.join(current_dir, '..'))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory)
config.set_main_option('sqlalchemy.url', settings.SQL_ALCHEMY_CONN)
command.upgrade(config, 'heads')
def resetdb():
'''
Clear out the database
'''
from airflow import models
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
logging.info("Dropping tables that exist")
models.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
initdb() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2009-Present, Redis Ltd.
* All rights reserved.
*
* Copyright (c) 2024-present, Valkey contributors.
* All rights reserved.
*
* Licensed under your choice of (a) the Redis Source Available License 2.0
* (RSALv2); or (b) the Server Side Public License v1 (SSPLv1); or (c) the
* GNU Affero General Public License v3 (AGPLv3).
*
* Portions of this file are available under BSD3 terms; see REDISCONTRIBUTIONS for more information.
*/
#include "server.h"
#include "intset.h" /* Compact integer set structure */
/*-----------------------------------------------------------------------------
* Set Commands
*----------------------------------------------------------------------------*/
void sunionDiffGenericCommand(client *c, robj **setkeys, int setnum,
robj *dstkey, int op);
/* Factory method to return a set that *can* hold "value". When the object has
* an integer-encodable value, an intset will be returned. Otherwise a listpack
* or a regular hash table.
*
* The size hint indicates approximately how many items will be added which is
* used to determine the initial representation. */
robj *setTypeCreate(sds value, size_t size_hint) {
if (isSdsRepresentableAsLongLong(value,NULL) == C_OK && size_hint <= server.set_max_intset_entries)
return createIntsetObject();
if (size_hint <= server.set_max_listpack_entries)
return createSetListpackObject();
/* We may oversize the set by using the hint if the hint is not accurate,
* but we will assume this is acceptable to maximize performance. */
robj *o = createSetObject();
dictExpand(o->ptr, size_hint);
return o;
}
/* Check if the existing set should be converted to another encoding based off the
* the size hint. */
void setTypeMaybeConvert(robj *set, size_t size_hint) {
if ((set->encoding == OBJ_ENCODING_LISTPACK && size_hint > server.set_max_listpack_entries)
|| (set->encoding == OBJ_ENCODING_INTSET && size_hint > server.set_max_intset_entries))
{
setTypeConvertAndExpand(set, OBJ_ENCODING_HT, size_hint, 1);
}
}
/* Return the maximum number of entries to store in an intset. */
static size_t intsetMaxEntries(void) {
size_t max_entries = server.set_max_intset_entries;
/* limit to 1G entries due to intset internals. */
if (max_entries >= 1<<30) max_entries = 1<<30;
return max_entries;
}
/* Converts intset to HT if it contains too many entries. */
static void maybeConvertIntset(robj *subject) {
serverAssert(subject->encoding == OBJ_ENCODING_INTSET);
if (intsetLen(subject->ptr) > intsetMaxEntries())
setTypeConvert(subject,OBJ_ENCODING_HT);
}
/* When you know all set elements are integers, call this to convert the set to
* an intset. No conversion happens if the set contains too many entries for an
* intset. */
static void maybeConvertToIntset(robj *set) {
if (set->encoding == OBJ_ENCODING_INTSET) return; /* already intset */
if (setTypeSize(set) > intsetMaxEntries()) return; /* can't use intset */
intset *is = intsetNew();
char *str;
size_t len = 0;
int64_t llval = 0;
setTypeIterator si;
setTypeInitIterator(&si, set);
while (setTypeNext(&si, &str, &len, &llval) != -1) {
if (str) {
/* If the element is returned as a string, we may be able to convert
* it to integer. This happens for OBJ_ENCODING_HT. */
serverAssert(string2ll(str, len, (long long *)&llval));
}
uint8_t success = 0;
is = intsetAdd(is, llval, &success);
serverAssert(success);
}
setTypeResetIterator(&si);
freeSetObject(set); /* frees the internals but not robj itself */
set->ptr = is;
set->encoding = OBJ_ENCODING_INTSET;
}
/* Add the specified sds value into a set.
*
* If the value was already member of the set, nothing is done and 0 is
* returned, otherwise the new element is added and 1 is returned. */
int setTypeAdd(robj *subject, sds value) {
return setTypeAddAux(subject, value, sdslen(value), 0, 1);
}
/* Add member. This function is optimized for the different encodings. The
* value can be provided as an sds string (indicated by passing str_is_sds =
* 1), as string and length (str_is_sds = 0) or as an integer in which case str
* is set to NULL and llval is provided instead.
*
* Returns 1 if the value was added and 0 if it was already a member. */
int setTypeAddAux(robj *set, char *str, size_t len, int64_t llval, int str_is_sds) {
char tmpbuf[LONG_STR_SIZE];
if (!str) {
if (set->encoding == OBJ_ENCODING_INTSET) {
uint8_t success = 0;
set->ptr = intsetAdd(set->ptr, llval, &success);
if (success) maybeConvertIntset(set);
return success;
}
/* Convert int to string. */
len = ll2string(tmpbuf, sizeof tmpbuf, llval);
str = tmpbuf;
str_is_sds = 0;
}
serverAssert(str);
if (set->encoding == OBJ_ENCODING_HT) {
/* Avoid duping the string if it is an sds string. */
sds sdsval = str_is_sds ? (sds)str : sdsnewlen(str, len);
dict *ht = set->ptr;
dictEntryLink bucket, link = dictFindLink(ht, sdsval, &bucket);
if (link == NULL) {
/* Key doesn't already exist in the set. Add it but dup the key. */
if (sdsval == str) sdsval = sdsdup(sdsval);
dictSetKeyAtLink(ht, sdsval, &bucket, 1);
*htGetMetadataSize(ht) += sdsAllocSize(sdsval);
return 1;
} else if (sdsval != str) {
/* String is already a member. Free our temporary sds copy. */
sdsfree(sdsval);
return 0;
}
} else if (set->encoding == OBJ_ENCODING_LISTPACK) {
unsigned char *lp = set->ptr;
unsigned char *p = lpFirst(lp);
if (p != NULL)
p = lpFind(lp, p, (unsigned char*)str, len, 0);
if (p == NULL) {
/* Not found. */
if (lpLength(lp) < server.set_max_listpack_entries &&
len <= server.set_max_listpack_value &&
lpSafeToAdd(lp, len))
{
if (str == tmpbuf) {
/* This came in as integer so we can avoid parsing it again.
* TODO: Create and use lpFindInteger; don't go via string. */
lp = lpAppendInteger(lp, llval);
} else {
lp = lpAppend(lp, (unsigned char*)str, len);
}
set->ptr = lp;
} else {
/* Size limit is reached. Convert to hashtable and add. */
setTypeConvertAndExpand(set, OBJ_ENCODING_HT, lpLength(lp) + 1, 1);
sds newval = sdsnewlen(str,len);
serverAssert(dictAdd(set->ptr,newval,NULL) == DICT_OK);
*htGetMetadataSize(set->ptr) += sdsAllocSize(newval);
}
return 1;
}
} else if (set->encoding == OBJ_ENCODING_INTSET) {
long long value;
if (string2ll(str, len, &value)) {
uint8_t success = 0;
set->ptr = intsetAdd(set->ptr,value,&success);
if (success) {
maybeConvertIntset(set);
return 1;
}
} else {
/* Check if listpack encoding is safe not to cross any threshold. */
size_t maxelelen = 0, totsize = 0;
unsigned long n = intsetLen(set->ptr);
if (n != 0) {
size_t elelen1 = sdigits10(intsetMax(set->ptr));
size_t elelen2 = sdigits10(intsetMin(set->ptr));
maxelelen = max(elelen1, elelen2);
size_t s1 = lpEstimateBytesRepeatedInteger(intsetMax(set->ptr), n);
size_t s2 = lpEstimateBytesRepeatedInteger(intsetMin(set->ptr), n);
totsize = max(s1, s2);
}
if (intsetLen((const intset*)set->ptr) < server.set_max_listpack_entries &&
len <= server.set_max_listpack_value &&
maxelelen <= server.set_max_listpack_value &&
lpSafeToAdd(NULL, totsize + len))
{
/* In the "safe to add" check above we assumed all elements in
* the intset are of size maxelelen. This is an upper bound. */
setTypeConvertAndExpand(set, OBJ_ENCODING_LISTPACK,
intsetLen(set->ptr) + 1, 1);
unsigned char *lp = set->ptr;
lp = lpAppend(lp, (unsigned char *)str, len);
lp = lpShrinkToFit(lp);
set->ptr = lp;
return 1;
} else {
setTypeConvertAndExpand(set, OBJ_ENCODING_HT,
intsetLen(set->ptr) + 1, 1);
/* The set *was* an intset and this value is not integer
* encodable, so dictAdd should always work. */
sds newval = sdsnewlen(str,len);
serverAssert(dictAdd(set->ptr,newval,NULL) == DICT_OK);
*htGetMetadataSize(set->ptr) += sdsAllocSize(newval);
return 1;
}
}
} else {
serverPanic("Unknown set encoding");
}
return 0;
}
/* Deletes a value provided as an sds string from the set. Returns 1 if the
* value was deleted and 0 if it was not a member of the set. */
int setTypeRemove(robj *setobj, sds value) {
return setTypeRemoveAux(setobj, value, sdslen(value), 0, 1);
}
/* Remove a member. This function is optimized for the different encodings. The
* value can be provided as an sds string (indicated by passing str_is_sds =
* 1), as string and length (str_is_sds = 0) or as an integer in which case str
* is set to NULL and llval is provided instead.
*
* Returns 1 if the value was deleted and 0 if it was not a member of the set. */
int setTypeRemoveAux(robj *setobj, char *str, size_t len, int64_t llval, int str_is_sds) {
char tmpbuf[LONG_STR_SIZE];
if (!str) {
if (setobj->encoding == OBJ_ENCODING_INTSET) {
int success;
setobj->ptr = intsetRemove(setobj->ptr,llval,&success);
return success;
}
len = ll2string(tmpbuf, sizeof tmpbuf, llval);
str = tmpbuf;
str_is_sds = 0;
}
if (setobj->encoding == OBJ_ENCODING_HT) {
sds sdsval = str_is_sds ? (sds)str : sdsnewlen(str, len);
int deleted = (dictDelete(setobj->ptr, sdsval) == DICT_OK);
if (sdsval != str) sdsfree(sdsval); /* free temp copy */
return deleted;
} else if (setobj->encoding == OBJ_ENCODING_LISTPACK) {
unsigned char *lp = setobj->ptr;
unsigned char *p = lpFirst(lp);
if (p == NULL) return 0;
p = lpFind(lp, p, (unsigned char*)str, len, 0);
if (p != NULL) {
lp = lpDelete(lp, p, NULL);
setobj->ptr = lp;
return 1;
}
} else if (setobj->encoding == OBJ_ENCODING_INTSET) {
long long llval;
if (string2ll(str, len, &llval)) {
int success;
setobj->ptr = intsetRemove(setobj->ptr,llval,&success);
if (success) return 1;
}
} else {
serverPanic("Unknown set encoding");
}
return 0;
}
/* Check if an sds string is a member of the set. Returns 1 if the value is a
* member of the set and 0 if it isn't. */
int setTypeIsMember(robj *subject, sds value) {
return setTypeIsMemberAux(subject, value, sdslen(value), 0, 1);
}
/* Membership checking optimized for the different encodings. The value can be
* provided as an sds string (indicated by passing str_is_sds = 1), as string
* and length (str_is_sds = 0) or as an integer in which case str is set to NULL
* and llval is provided instead.
*
* Returns 1 if the value is a member of the set and 0 if it isn't. */
int setTypeIsMemberAux(robj *set, char *str, size_t len, int64_t llval, int str_is_sds) {
char tmpbuf[LONG_STR_SIZE];
if (!str) {
if (set->encoding == OBJ_ENCODING_INTSET)
return intsetFind(set->ptr, llval);
len = ll2string(tmpbuf, sizeof tmpbuf, llval);
str = tmpbuf;
str_is_sds = 0;
}
if (set->encoding == OBJ_ENCODING_LISTPACK) {
unsigned char *lp = set->ptr;
unsigned char *p = lpFirst(lp);
return p && lpFind(lp, p, (unsigned char*)str, len, 0);
} else if (set->encoding == OBJ_ENCODING_INTSET) {
long long llval;
return string2ll(str, len, &llval) && intsetFind(set->ptr, llval);
} else if (set->encoding == OBJ_ENCODING_HT && str_is_sds) {
return dictFind(set->ptr, (sds)str) != NULL;
} else if (set->encoding == OBJ_ENCODING_HT) {
sds sdsval = sdsnewlen(str, len);
int result = dictFind(set->ptr, sdsval) != NULL;
sdsfree(sdsval);
return result;
} else {
serverPanic("Unknown set encoding");
}
}
void setTypeInitIterator(setTypeIterator *si, robj *subject) {
si->subject = subject;
si->encoding = subject->encoding;
if (si->encoding == OBJ_ENCODING_HT) {
dictInitIterator(&si->di, subject->ptr);
} else if (si->encoding == OBJ_ENCODING_INTSET) {
si->ii = 0;
} else if (si->encoding == OBJ_ENCODING_LISTPACK) {
si->lpi = NULL;
} else {
serverPanic("Unknown set encoding");
}
}
void setTypeResetIterator(setTypeIterator *si) {
if (si->encoding == OBJ_ENCODING_HT)
dictResetIterator(&si->di);
}
/* Move to the next entry in the set. Returns the object at the current
* position, as a string or as an integer.
*
* Since set elements can be internally be stored as SDS strings, char buffers or
* simple arrays of integers, setTypeNext returns the encoding of the
* set object you are iterating, and will populate the appropriate pointers
* (str and len) or (llele) depending on whether the value is stored as a string
* or as an integer internally.
*
* If OBJ_ENCODING_HT is returned, then str points to an sds string and can be
* used as such. If OBJ_ENCODING_INTSET, then llele is populated and str is
* pointed to NULL. If OBJ_ENCODING_LISTPACK is returned, the value can be
* either a string or an integer. If *str is not NULL, then str and len are
* populated with the string content and length. Otherwise, llele populated with
* an integer value.
*
* Note that str, len and llele pointers should all be passed and cannot
* be NULL since the function will try to defensively populate the non
* used field with values which are easy to trap if misused.
*
* When there are no more elements -1 is returned. */
int setTypeNext(setTypeIterator *si, char **str, size_t *len, int64_t *llele) {
if (si->encoding == OBJ_ENCODING_HT) {
dictEntry *de = dictNext(&si->di);
if (de == NULL) return -1;
*str = dictGetKey(de);
*len = sdslen(*str);
*llele = -123456789; /* Not needed. Defensive. */
} else if (si->encoding == OBJ_ENCODING_INTSET) {
if (!intsetGet(si->subject->ptr,si->ii++,llele))
return -1;
*str = NULL;
} else if (si->encoding == OBJ_ENCODING_LISTPACK) {
unsigned char *lp = si->subject->ptr;
unsigned char *lpi = si->lpi;
if (lpi == NULL) {
lpi = lpFirst(lp);
} else {
lpi = lpNext(lp, lpi);
}
if (lpi == NULL) return -1;
si->lpi = lpi;
unsigned int l = 0;
*str = (char *)lpGetValue(lpi, &l, (long long *)llele);
*len = (size_t)l;
} else {
serverPanic("Wrong set encoding in setTypeNext");
}
return si->encoding;
}
/* The not copy on write friendly version but easy to use version
* of setTypeNext() is setTypeNextObject(), returning new SDS
* strings. So if you don't retain a pointer to this object you should call
* sdsfree() against it.
*
* This function is the way to go for write operations where COW is not
* an issue. */
sds setTypeNextObject(setTypeIterator *si) {
int64_t intele = 0;
char *str;
size_t len = 0;
if (setTypeNext(si, &str, &len, &intele) == -1) return NULL;
if (str != NULL) return sdsnewlen(str, len);
return sdsfromlonglong(intele);
}
/* Return random element from a non empty set.
* The returned element can be an int64_t value if the set is encoded
* as an "intset" blob of integers, or an string.
*
* The caller provides three pointers to be populated with the right
* object. The return value of the function is the object->encoding
* field of the object and can be used by the caller to check if the
* int64_t pointer or the str and len pointers were populated, as for
* setTypeNext. If OBJ_ENCODING_HT is returned, str is pointed to a
* string which is actually an sds string and it can be used as such.
*
* Note that both the str, len and llele pointers should be passed and cannot
* be NULL. If str is set to NULL, the value is an integer stored in llele. */
int setTypeRandomElement(robj *setobj, char **str, size_t *len, int64_t *llele) {
if (setobj->encoding == OBJ_ENCODING_HT) {
dictEntry *de = dictGetFairRandomKey(setobj->ptr);
*str = dictGetKey(de);
*len = sdslen(*str);
*llele = -123456789; /* Not needed. Defensive. */
} else if (setobj->encoding == OBJ_ENCODING_INTSET) {
*llele = intsetRandom(setobj->ptr);
*str = NULL; /* Not needed. Defensive. */
} else if (setobj->encoding == OBJ_ENCODING_LISTPACK) {
unsigned char *lp = setobj->ptr;
int r = rand() % lpLength(lp);
unsigned char *p = lpSeek(lp, r);
unsigned int l;
*str = (char *)lpGetValue(p, &l, (long long *)llele);
*len = (size_t)l;
} else {
serverPanic("Unknown set encoding");
}
return setobj->encoding;
}
/* Pops a random element and returns it as an object. */
robj *setTypePopRandom(robj *set) {
robj *obj;
if (set->encoding == OBJ_ENCODING_LISTPACK) {
/* Find random and delete it without re-seeking the listpack. */
unsigned int i = 0;
unsigned char *p = lpNextRandom(set->ptr, lpFirst(set->ptr), &i, 1, 1);
unsigned int len = 0; /* initialize to silence warning */
long long llele = 0; /* initialize to silence warning */
char *str = (char *)lpGetValue(p, &len, &llele);
if (str)
obj = createStringObject(str, len);
else
obj = createStringObjectFromLongLong(llele);
set->ptr = lpDelete(set->ptr, p, NULL);
} else {
char *str;
size_t len = 0;
int64_t llele = 0;
int encoding = setTypeRandomElement(set, &str, &len, &llele);
if (str)
obj = createStringObject(str, len);
else
obj = createStringObjectFromLongLong(llele);
setTypeRemoveAux(set, str, len, llele, encoding == OBJ_ENCODING_HT);
}
return obj;
}
unsigned long setTypeSize(const robj *subject) {
if (subject->encoding == OBJ_ENCODING_HT) {
return dictSize((const dict*)subject->ptr);
} else if (subject->encoding == OBJ_ENCODING_INTSET) {
return intsetLen((const intset*)subject->ptr);
} else if (subject->encoding == OBJ_ENCODING_LISTPACK) {
return lpLength((unsigned char *)subject->ptr);
} else {
serverPanic("Unknown set encoding");
}
}
size_t setTypeAllocSize(const robj *o) {
serverAssertWithInfo(NULL,o,o->type == OBJ_SET);
size_t size = 0;
if (o->encoding == OBJ_ENCODING_HT) {
dict *d = o->ptr;
size += sizeof(dict) + dictMemUsage(d) + *htGetMetadataSize(d);
} else if (o->encoding == OBJ_ENCODING_INTSET) {
size = intsetAllocSize(o->ptr);
} else if (o->encoding == OBJ_ENCODING_LISTPACK) {
size = lpBytes(o->ptr);
} else {
serverPanic("Unknown set encoding");
}
return size;
}
/* Convert the set to specified encoding. The resulting dict (when converting
* to a hash table) is presized to hold the number of elements in the original
* set. */
void setTypeConvert(robj *setobj, int enc) {
setTypeConvertAndExpand(setobj, enc, setTypeSize(setobj), 1);
}
/* Converts a set to the specified encoding, pre-sizing it for 'cap' elements.
* The 'panic' argument controls whether to panic on OOM (panic=1) or return
* C_ERR on OOM (panic=0). If panic=1 is given, this function always returns
* C_OK. */
int setTypeConvertAndExpand(robj *setobj, int enc, unsigned long cap, int panic) {
setTypeIterator si;
serverAssertWithInfo(NULL,setobj,setobj->type == OBJ_SET &&
setobj->encoding != enc);
if (enc == OBJ_ENCODING_HT) {
dict *d = dictCreate(&setDictType);
sds element;
/* Presize the dict to avoid rehashing */
if (panic) {
dictExpand(d, cap);
} else if (dictTryExpand(d, cap) != DICT_OK) {
dictRelease(d);
return C_ERR;
}
/* To add the elements we extract integers and create redis objects */
size_t *alloc_size = htGetMetadataSize(d);
setTypeInitIterator(&si, setobj);
while ((element = setTypeNextObject(&si)) != NULL) {
serverAssert(dictAdd(d,element,NULL) == DICT_OK);
*alloc_size += sdsAllocSize(element);
}
setTypeResetIterator(&si);
freeSetObject(setobj); /* frees the internals but not setobj itself */
setobj->encoding = OBJ_ENCODING_HT;
setobj->ptr = d;
} else if (enc == OBJ_ENCODING_LISTPACK) {
/* Preallocate the minimum two bytes per element (enc/value + backlen) */
size_t estcap = cap * 2;
if (setobj->encoding == OBJ_ENCODING_INTSET && setTypeSize(setobj) > 0) {
/* If we're converting from intset, we have a better estimate. */
size_t s1 = lpEstimateBytesRepeatedInteger(intsetMin(setobj->ptr), cap);
size_t s2 = lpEstimateBytesRepeatedInteger(intsetMax(setobj->ptr), cap);
estcap = max(s1, s2);
}
unsigned char *lp = lpNew(estcap);
char *str;
size_t len = 0;
int64_t llele = 0;
setTypeInitIterator(&si, setobj);
while (setTypeNext(&si, &str, &len, &llele) != -1) {
if (str != NULL)
lp = lpAppend(lp, (unsigned char *)str, len);
else
lp = lpAppendInteger(lp, llele);
}
setTypeResetIterator(&si);
freeSetObject(setobj); /* frees the internals but not setobj itself */
setobj->encoding = OBJ_ENCODING_LISTPACK;
setobj->ptr = lp;
} else {
serverPanic("Unsupported set conversion");
}
return C_OK;
}
/* This is a helper function for the COPY command.
* Duplicate a set object, with the guarantee that the returned object
* has the same encoding as the original one.
*
* The resulting object always has refcount set to 1 */
robj *setTypeDup(robj *o) {
robj *set;
serverAssert(o->type == OBJ_SET);
/* Create a new set object that have the same encoding as the original object's encoding */
if (o->encoding == OBJ_ENCODING_INTSET) {
intset *is = o->ptr;
size_t size = intsetBlobLen(is);
intset *newis = zmalloc(size);
memcpy(newis,is,size);
set = createObject(OBJ_SET, newis);
set->encoding = OBJ_ENCODING_INTSET;
} else if (o->encoding == OBJ_ENCODING_LISTPACK) {
unsigned char *lp = o->ptr;
size_t sz = lpBytes(lp);
unsigned char *new_lp = zmalloc(sz);
memcpy(new_lp, lp, sz);
set = createObject(OBJ_SET, new_lp);
set->encoding = OBJ_ENCODING_LISTPACK;
} else if (o->encoding == OBJ_ENCODING_HT) {
set = createSetObject();
dict *d = o->ptr;
dictExpand(set->ptr, dictSize(d));
setTypeIterator si;
setTypeInitIterator(&si, o);
char *str;
size_t len = 0;
int64_t intobj = 0;
while (setTypeNext(&si, &str, &len, &intobj) != -1) {
setTypeAdd(set, (sds)str);
}
setTypeResetIterator(&si);
} else {
serverPanic("Unknown set encoding");
}
return set;
}
void saddCommand(client *c) {
kvobj *set;
int j, added = 0;
dictEntryLink link;
size_t oldsize = 0;
set = lookupKeyWriteWithLink(c->db,c->argv[1], &link);
if (checkType(c,set,OBJ_SET)) return;
if (set == NULL) {
robj *o = setTypeCreate(c->argv[2]->ptr, c->argc - 2);
set = dbAddByLink(c->db, c->argv[1], &o, &link);
} else {
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(set);
setTypeMaybeConvert(set, c->argc - 2);
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
}
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(set);
for (j = 2; j < c->argc; j++) {
if (setTypeAdd(set,c->argv[j]->ptr)) added++;
}
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
if (added) {
unsigned long size = setTypeSize(set);
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_SET, size - added, size);
keyModified(c,c->db,c->argv[1],set,1);
notifyKeyspaceEvent(NOTIFY_SET,"sadd",c->argv[1],c->db->id);
}
server.dirty += added;
addReplyLongLong(c,added);
}
void sremCommand(client *c) {
int j, deleted = 0, keyremoved = 0;
size_t oldsize = 0;
kvobj *set = lookupKeyWriteOrReply(c, c->argv[1], shared.czero);
if (set == NULL || checkType(c, set, OBJ_SET))
return;
unsigned long oldSize = setTypeSize(set);
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(set);
for (j = 2; j < c->argc; j++) {
if (setTypeRemove(set,c->argv[j]->ptr)) {
deleted++;
if (setTypeSize(set) == 0) {
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
dbDeleteSkipKeysizesUpdate(c->db, c->argv[1]);
keyremoved = 1;
break;
}
}
}
if (server.memory_tracking_enabled && !keyremoved)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
if (deleted) {
int64_t newSize = oldSize - deleted;
keyModified(c, c->db, c->argv[1], keyremoved ? NULL : set, 1);
notifyKeyspaceEvent(NOTIFY_SET,"srem",c->argv[1],c->db->id);
if (keyremoved) {
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],
c->db->id);
newSize = -1; /* removed */
}
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_SET, oldSize, newSize);
server.dirty += deleted;
}
addReplyLongLong(c,deleted);
}
void smoveCommand(client *c) {
robj *srcset, *dstset, *ele;
size_t oldSrcAllocSize = 0, oldDstAllocSize = 0;
srcset = lookupKeyWrite(c->db,c->argv[1]);
dstset = lookupKeyWrite(c->db,c->argv[2]);
ele = c->argv[3];
/* If the source key does not exist return 0 */
if (srcset == NULL) {
addReply(c,shared.czero);
return;
}
/* If the source key has the wrong type, or the destination key
* is set and has the wrong type, return with an error. */
if (checkType(c,srcset,OBJ_SET) ||
checkType(c,dstset,OBJ_SET)) return;
/* If srcset and dstset are equal, SMOVE is a no-op */
if (srcset == dstset) {
addReply(c,setTypeIsMember(srcset,ele->ptr) ?
shared.cone : shared.czero);
return;
}
if (server.memory_tracking_enabled)
oldSrcAllocSize = kvobjAllocSize(srcset);
int deleted = setTypeRemove(srcset,ele->ptr);
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), srcset, oldSrcAllocSize, kvobjAllocSize(srcset));
/* If the element cannot be removed from the src set, return 0. */
if (!deleted) {
addReply(c,shared.czero);
return;
}
notifyKeyspaceEvent(NOTIFY_SET,"srem",c->argv[1],c->db->id);
/* Update keysizes histogram */
int64_t srcNewLen = setTypeSize(srcset), srcOldLen = srcNewLen + 1;
/* Remove the src set from the database when empty */
if (srcNewLen == 0) {
dbDeleteSkipKeysizesUpdate(c->db,c->argv[1]);
srcNewLen = -1; /* removed */
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],c->db->id);
}
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_SET, srcOldLen, srcNewLen);
/* Create the destination set when it doesn't exist */
if (!dstset) {
dstset = setTypeCreate(ele->ptr, 1);
dbAdd(c->db, c->argv[2], &dstset);
}
keyModified(c, c->db, c->argv[1], (srcNewLen > 0) ? srcset : NULL, 1);
server.dirty++;
if (server.memory_tracking_enabled)
oldDstAllocSize = kvobjAllocSize(dstset);
/* An extra key has changed when ele was successfully added to dstset */
if (setTypeAdd(dstset,ele->ptr)) {
unsigned long dstLen = setTypeSize(dstset);
updateKeysizesHist(c->db, getKeySlot(c->argv[2]->ptr), OBJ_SET, dstLen - 1, dstLen);
server.dirty++;
keyModified(c,c->db,c->argv[2],dstset,1);
notifyKeyspaceEvent(NOTIFY_SET,"sadd",c->argv[2],c->db->id);
}
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[2]->ptr), dstset, oldDstAllocSize, kvobjAllocSize(dstset));
addReply(c,shared.cone);
}
void sismemberCommand(client *c) {
kvobj *set;
size_t oldsize = 0;
if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL ||
checkType(c,set,OBJ_SET)) return;
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(set);
if (setTypeIsMember(set,c->argv[2]->ptr))
addReply(c,shared.cone);
else
addReply(c,shared.czero);
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
}
void smismemberCommand(client *c) {
/* Don't abort when the key cannot be found. Non-existing keys are empty
* sets, where SMISMEMBER should respond with a series of zeros. */
size_t oldsize = 0;
kvobj *set = lookupKeyRead(c->db, c->argv[1]);
if (set && checkType(c,set,OBJ_SET)) return;
addReplyArrayLen(c,c->argc - 2);
if (server.memory_tracking_enabled && set)
oldsize = kvobjAllocSize(set);
for (int j = 2; j < c->argc; j++) {
if (set && setTypeIsMember(set,c->argv[j]->ptr))
addReply(c,shared.cone);
else
addReply(c,shared.czero);
}
if (server.memory_tracking_enabled && set)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
}
void scardCommand(client *c) {
kvobj *kv;
if ((kv = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL ||
checkType(c,kv,OBJ_SET)) return;
addReplyLongLong(c,setTypeSize(kv));
}
/* Handle the "SPOP key <count>" variant. The normal version of the
* command is handled by the spopCommand() function itself. */
/* How many times bigger should be the set compared to the remaining size
* for us to use the "create new set" strategy? Read later in the
* implementation for more info. */
#define SPOP_MOVE_STRATEGY_MUL 5
void spopWithCountCommand(client *c) {
long l;
unsigned long count, size, toRemove;
size_t oldsize = 0;
/* Get the count argument */
if (getPositiveLongFromObjectOrReply(c,c->argv[2],&l,NULL) != C_OK) return;
count = (unsigned long) l;
/* Make sure a key with the name inputted exists, and that it's type is
* indeed a kv. Otherwise, return nil */
robj *set = lookupKeyWriteOrReply(c, c->argv[1], shared.emptyset[c->resp]);
if (set == NULL || checkType(c, set, OBJ_SET)) return;
/* If count is zero, serve an empty set ASAP to avoid special
* cases later. */
if (count == 0) {
addReply(c,shared.emptyset[c->resp]);
return;
}
size = setTypeSize(set);
toRemove = (count >= size) ? size : count;
/* Generate an SPOP keyspace notification */
notifyKeyspaceEvent(NOTIFY_SET,"spop",c->argv[1],c->db->id);
server.dirty += toRemove;
/* CASE 1:
* The number of requested elements is greater than or equal to
* the number of elements inside the set: simply return the whole set. */
if (count >= size) {
/* We just return the entire set */
sunionDiffGenericCommand(c,c->argv+1,1,NULL,SET_OP_UNION);
/* Delete the set as it is now empty */
dbDelete(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],c->db->id);
/* todo: Move the spop notification to be executed after the command logic. */
/* Propagate this command as a DEL or UNLINK operation */
robj *aux = server.lazyfree_lazy_server_del ? shared.unlink : shared.del;
rewriteClientCommandVector(c, 2, aux, c->argv[1]);
keyModified(c,c->db,c->argv[1],NULL,1);
return;
}
/* Case 2 and 3 require to replicate SPOP as a set of SREM commands.
* Prepare our replication argument vector. Also send the array length
* which is common to both the code paths. */
unsigned long batchsize = count > 1024 ? 1024 : count;
robj **propargv = zmalloc(sizeof(robj *) * (2 + batchsize));
propargv[0] = shared.srem;
propargv[1] = c->argv[1];
unsigned long propindex = 2;
addReplySetLen(c,count);
/* Common iteration vars. */
char *str;
size_t len = 0;
int64_t llele = 0;
unsigned long remaining = size-count; /* Elements left after SPOP. */
/* If we are here, the number of requested elements is less than the
* number of elements inside the set. Also we are sure that count < size.
* Use two different strategies.
*
* CASE 2: The number of elements to return is small compared to the
* set size. We can just extract random elements and return them to
* the set. */
if (remaining*SPOP_MOVE_STRATEGY_MUL > count &&
set->encoding == OBJ_ENCODING_LISTPACK)
{
/* Specialized case for listpack. Traverse it only once. */
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(set);
unsigned char *lp = set->ptr;
unsigned char *p = lpFirst(lp);
unsigned int index = 0;
unsigned char **ps = zmalloc(sizeof(char *) * count);
for (unsigned long i = 0; i < count; i++) {
p = lpNextRandom(lp, p, &index, count - i, 1);
unsigned int len = 0;
str = (char *)lpGetValue(p, &len, (long long *)&llele);
if (str) {
addReplyBulkCBuffer(c, str, len);
propargv[propindex++] = createStringObject(str, len);
} else {
addReplyBulkLongLong(c, llele);
propargv[propindex++] = createStringObjectFromLongLong(llele);
}
/* Replicate/AOF this command as an SREM operation */
if (propindex == 2 + batchsize) {
alsoPropagate(c->db->id, propargv, propindex, PROPAGATE_AOF | PROPAGATE_REPL);
for (unsigned long j = 2; j < propindex; j++) {
decrRefCount(propargv[j]);
}
propindex = 2;
}
/* Store pointer for later deletion and move to next. */
ps[i] = p;
p = lpNext(lp, p);
index++;
}
lp = lpBatchDelete(lp, ps, count);
zfree(ps);
set->ptr = lp;
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_SET, size, size - count);
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
} else if (remaining*SPOP_MOVE_STRATEGY_MUL > count) {
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(set);
for (unsigned long i = 0; i < count; i++) {
propargv[propindex] = setTypePopRandom(set);
addReplyBulk(c, propargv[propindex]);
propindex++;
/* Replicate/AOF this command as an SREM operation */
if (propindex == 2 + batchsize) {
alsoPropagate(c->db->id, propargv, propindex, PROPAGATE_AOF | PROPAGATE_REPL);
for (unsigned long j = 2; j < propindex; j++) {
decrRefCount(propargv[j]);
}
propindex = 2;
}
}
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_SET, size, size - count);
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
} else {
/* CASE 3: The number of elements to return is very big, approaching
* the size of the set itself. After some time extracting random elements
* from such a set becomes computationally expensive, so we use
* a different strategy, we extract random elements that we don't
* want to return (the elements that will remain part of the set),
* creating a new set as we do this (that will be stored as the original
* set). Then we return the elements left in the original set and
* release it. */
robj *newset = NULL;
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(set);
/* Create a new set with just the remaining elements. */
if (set->encoding == OBJ_ENCODING_LISTPACK) {
/* Specialized case for listpack. Traverse it only once. */
newset = createSetListpackObject();
unsigned char *lp = set->ptr;
unsigned char *p = lpFirst(lp);
unsigned int index = 0;
unsigned char **ps = zmalloc(sizeof(char *) * remaining);
for (unsigned long i = 0; i < remaining; i++) {
p = lpNextRandom(lp, p, &index, remaining - i, 1);
unsigned int len = 0;
str = (char *)lpGetValue(p, &len, (long long *)&llele);
setTypeAddAux(newset, str, len, llele, 0);
ps[i] = p;
p = lpNext(lp, p);
index++;
}
lp = lpBatchDelete(lp, ps, remaining);
zfree(ps);
set->ptr = lp;
} else {
while(remaining--) {
int encoding = setTypeRandomElement(set, &str, &len, &llele);
if (!newset) {
newset = str ? createSetListpackObject() : createIntsetObject();
}
setTypeAddAux(newset, str, len, llele, encoding == OBJ_ENCODING_HT);
setTypeRemoveAux(set, str, len, llele, encoding == OBJ_ENCODING_HT);
}
}
/* Transfer the old set to the client. */
setTypeIterator si;
setTypeInitIterator(&si, set);
while (setTypeNext(&si, &str, &len, &llele) != -1) {
if (str == NULL) {
addReplyBulkLongLong(c,llele);
propargv[propindex++] = createStringObjectFromLongLong(llele);
} else {
addReplyBulkCBuffer(c, str, len);
propargv[propindex++] = createStringObject(str, len);
}
/* Replicate/AOF this command as an SREM operation */
if (propindex == 2 + batchsize) {
alsoPropagate(c->db->id, propargv, propindex, PROPAGATE_AOF | PROPAGATE_REPL);
for (unsigned long i = 2; i < propindex; i++) {
decrRefCount(propargv[i]);
}
propindex = 2;
}
}
setTypeResetIterator(&si);
/* Update key size histogram "explicitly" and not indirectly by dbReplaceValue()
* since function dbReplaceValue() assumes the entire set is being replaced,
* but here we're building the new set from the existing one. As a result,
* the size of the old set has already changed by the time we reach this point. */
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_SET, size, size-count);
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
dbReplaceValue(c->db, c->argv[1], &newset, 0);
set = newset;
}
/* Replicate/AOF the remaining elements as an SREM operation */
if (propindex != 2) {
alsoPropagate(c->db->id, propargv, propindex, PROPAGATE_AOF | PROPAGATE_REPL);
for (unsigned long i = 2; i < propindex; i++) {
decrRefCount(propargv[i]);
}
propindex = 2;
}
zfree(propargv);
/* Don't propagate the command itself even if we incremented the
* dirty counter. We don't want to propagate an SPOP command since
* we propagated the command as a set of SREMs operations using
* the alsoPropagate() API. */
preventCommandPropagation(c);
keyModified(c,c->db,c->argv[1],set,1);
}
void spopCommand(client *c) {
unsigned long size;
robj *ele;
size_t oldsize = 0;
if (c->argc == 3) {
spopWithCountCommand(c);
return;
} else if (c->argc > 3) {
addReplyErrorObject(c,shared.syntaxerr);
return;
}
/* Make sure a key with the name inputted exists, and that it's type is
* indeed a kv */
kvobj *kv = lookupKeyWriteOrReply(c, c->argv[1], shared.null[c->resp]);
if (kv == NULL || checkType(c, kv, OBJ_SET)) return;
size = setTypeSize(kv);
updateKeysizesHist(c->db, getKeySlot(c->argv[1]->ptr), OBJ_SET, size, size-1);
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(kv);
/* Pop a random element from the kv */
ele = setTypePopRandom(kv);
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), kv, oldsize, kvobjAllocSize(kv));
notifyKeyspaceEvent(NOTIFY_SET,"spop",c->argv[1],c->db->id);
/* Replicate/AOF this command as an SREM operation */
rewriteClientCommandVector(c,3,shared.srem,c->argv[1],ele);
/* Add the element to the reply */
addReplyBulk(c, ele);
decrRefCount(ele);
/* Delete the kv if it's empty */
int deleted = 0;
if (setTypeSize(kv) == 0) {
deleted = 1;
dbDelete(c->db,c->argv[1]);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],c->db->id);
}
/* Set has been modified */
keyModified(c, c->db, c->argv[1], deleted ? NULL : kv, 1);
server.dirty++;
}
/* handle the "SRANDMEMBER key <count>" variant. The normal version of the
* command is handled by the srandmemberCommand() function itself. */
/* How many times bigger should be the set compared to the requested size
* for us to don't use the "remove elements" strategy? Read later in the
* implementation for more info. */
#define SRANDMEMBER_SUB_STRATEGY_MUL 3
/* If client is trying to ask for a very large number of random elements,
* queuing may consume an unlimited amount of memory, so we want to limit
* the number of randoms per time. */
#define SRANDFIELD_RANDOM_SAMPLE_LIMIT 1000
void srandmemberWithCountCommand(client *c) {
long l;
unsigned long count, size;
int uniq = 1;
kvobj *set;
char *str;
size_t len = 0;
int64_t llele = 0;
dict *d;
if (getRangeLongFromObjectOrReply(c,c->argv[2],-LONG_MAX,LONG_MAX,&l,NULL) != C_OK) return;
if (l >= 0) {
count = (unsigned long) l;
} else {
/* A negative count means: return the same elements multiple times
* (i.e. don't remove the extracted element after every extraction). */
count = -l;
uniq = 0;
}
if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.emptyarray))
== NULL || checkType(c,set,OBJ_SET)) return;
size = setTypeSize(set);
/* If count is zero, serve it ASAP to avoid special cases later. */
if (count == 0) {
addReply(c,shared.emptyarray);
return;
}
/* CASE 1: The count was negative, so the extraction method is just:
* "return N random elements" sampling the whole set every time.
* This case is trivial and can be served without auxiliary data
* structures. This case is the only one that also needs to return the
* elements in random order. */
if (!uniq || count == 1) {
addReplyArrayLen(c,count);
if (set->encoding == OBJ_ENCODING_LISTPACK && count > 1) {
/* Specialized case for listpack, traversing it only once. */
unsigned long limit, sample_count;
limit = count > SRANDFIELD_RANDOM_SAMPLE_LIMIT ? SRANDFIELD_RANDOM_SAMPLE_LIMIT : count;
listpackEntry *entries = zmalloc(limit * sizeof(listpackEntry));
while (count) {
sample_count = count > limit ? limit : count;
count -= sample_count;
lpRandomEntries(set->ptr, sample_count, entries);
for (unsigned long i = 0; i < sample_count; i++) {
if (entries[i].sval)
addReplyBulkCBuffer(c, entries[i].sval, entries[i].slen);
else
addReplyBulkLongLong(c, entries[i].lval);
}
if (c->flags & CLIENT_CLOSE_ASAP)
break;
}
zfree(entries);
return;
}
while(count--) {
setTypeRandomElement(set, &str, &len, &llele);
if (str == NULL) {
addReplyBulkLongLong(c,llele);
} else {
addReplyBulkCBuffer(c, str, len);
}
if (c->flags & CLIENT_CLOSE_ASAP)
break;
}
return;
}
/* CASE 2:
* The number of requested elements is greater than the number of
* elements inside the set: simply return the whole set. */
if (count >= size) {
setTypeIterator si;
addReplyArrayLen(c,size);
setTypeInitIterator(&si, set);
while (setTypeNext(&si, &str, &len, &llele) != -1) {
if (str == NULL) {
addReplyBulkLongLong(c,llele);
} else {
addReplyBulkCBuffer(c, str, len);
}
size--;
}
setTypeResetIterator(&si);
serverAssert(size==0);
return;
}
/* CASE 2.5 listpack only. Sampling unique elements, in non-random order.
* Listpack encoded sets are meant to be relatively small, so
* SRANDMEMBER_SUB_STRATEGY_MUL isn't necessary and we rather not make
* copies of the entries. Instead, we emit them directly to the output
* buffer.
*
* And it is inefficient to repeatedly pick one random element from a
* listpack in CASE 4. So we use this instead. */
if (set->encoding == OBJ_ENCODING_LISTPACK) {
unsigned char *lp = set->ptr;
unsigned char *p = lpFirst(lp);
unsigned int i = 0;
addReplyArrayLen(c, count);
while (count) {
p = lpNextRandom(lp, p, &i, count--, 1);
unsigned int len;
str = (char *)lpGetValue(p, &len, (long long *)&llele);
if (str == NULL) {
addReplyBulkLongLong(c, llele);
} else {
addReplyBulkCBuffer(c, str, len);
}
p = lpNext(lp, p);
i++;
}
return;
}
/* For CASE 3 and CASE 4 we need an auxiliary dictionary. */
d = dictCreate(&sdsReplyDictType);
/* CASE 3:
* The number of elements inside the set is not greater than
* SRANDMEMBER_SUB_STRATEGY_MUL times the number of requested elements.
* In this case we create a set from scratch with all the elements, and
* subtract random elements to reach the requested number of elements.
*
* This is done because if the number of requested elements is just
* a bit less than the number of elements in the set, the natural approach
* used into CASE 4 is highly inefficient. */
if (count*SRANDMEMBER_SUB_STRATEGY_MUL > size) {
setTypeIterator si;
/* Add all the elements into the temporary dictionary. */
setTypeInitIterator(&si, set);
dictExpand(d, size);
while (setTypeNext(&si, &str, &len, &llele) != -1) {
int retval = DICT_ERR;
if (str == NULL) {
retval = dictAdd(d,sdsfromlonglong(llele),NULL);
} else {
retval = dictAdd(d, sdsnewlen(str, len), NULL);
}
serverAssert(retval == DICT_OK);
}
setTypeResetIterator(&si);
serverAssert(dictSize(d) == size);
/* Remove random elements to reach the right count. */
while (size > count) {
dictEntry *de;
de = dictGetFairRandomKey(d);
dictUnlink(d,dictGetKey(de));
sdsfree(dictGetKey(de));
dictFreeUnlinkedEntry(d,de);
size--;
}
}
/* CASE 4: We have a big set compared to the requested number of elements.
* In this case we can simply get random elements from the set and add
* to the temporary set, trying to eventually get enough unique elements
* to reach the specified count. */
else {
unsigned long added = 0;
sds sdsele;
dictExpand(d, count);
while (added < count) {
setTypeRandomElement(set, &str, &len, &llele);
if (str == NULL) {
sdsele = sdsfromlonglong(llele);
} else {
sdsele = sdsnewlen(str, len);
}
/* Try to add the object to the dictionary. If it already exists
* free it, otherwise increment the number of objects we have
* in the result dictionary. */
if (dictAdd(d,sdsele,NULL) == DICT_OK)
added++;
else
sdsfree(sdsele);
}
}
/* CASE 3 & 4: send the result to the user. */
{
dictIterator di;
dictEntry *de;
addReplyArrayLen(c,count);
dictInitIterator(&di, d);
while((de = dictNext(&di)) != NULL)
addReplyBulkSds(c,dictGetKey(de));
dictResetIterator(&di);
dictRelease(d);
}
}
/* SRANDMEMBER <key> [<count>] */
void srandmemberCommand(client *c) {
kvobj *set;
char *str;
size_t len = 0;
int64_t llele = 0;
size_t oldsize = 0;
if (c->argc == 3) {
srandmemberWithCountCommand(c);
return;
} else if (c->argc > 3) {
addReplyErrorObject(c,shared.syntaxerr);
return;
}
/* Handle variant without <count> argument. Reply with simple bulk string */
if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.null[c->resp]))
== NULL || checkType(c,set,OBJ_SET)) return;
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(set);
setTypeRandomElement(set, &str, &len, &llele);
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
if (str == NULL) {
addReplyBulkLongLong(c,llele);
} else {
addReplyBulkCBuffer(c, str, len);
}
}
typedef struct setopsrc {
robj *set;
size_t oldsize;
} setopsrc;
int qsortCompareSetsByCardinality(const void *s1, const void *s2) {
robj *o1 = ((setopsrc*)s1)->set, *o2 = ((setopsrc*)s2)->set;
if (setTypeSize(o1) > setTypeSize(o2)) return 1;
if (setTypeSize(o1) < setTypeSize(o2)) return -1;
return 0;
}
/* This is used by SDIFF and in this case we can receive NULL that should
* be handled as empty sets. */
int qsortCompareSetsByRevCardinality(const void *s1, const void *s2) {
robj *o1 = ((setopsrc*)s1)->set, *o2 = ((setopsrc*)s2)->set;
unsigned long first = o1 ? setTypeSize(o1) : 0;
unsigned long second = o2 ? setTypeSize(o2) : 0;
if (first < second) return 1;
if (first > second) return -1;
return 0;
}
/* SINTER / SINTERSTORE / SINTERCARD
*
* 'cardinality_only' work for SINTERCARD, only return the cardinality
* with minimum processing and memory overheads.
*
* 'limit' work for SINTERCARD, stop searching after reaching the limit.
* Passing a 0 means unlimited.
*/
void sinterGenericCommand(client *c, robj **setkeys,
unsigned long setnum, robj *dstkey,
int cardinality_only, unsigned long limit) {
setopsrc *sets = zmalloc(sizeof(setopsrc)*setnum);
setTypeIterator si;
robj *dstset = NULL;
char *str;
size_t len = 0;
int64_t intobj = 0;
void *replylen = NULL;
unsigned long j, cardinality = 0;
int encoding, empty = 0;
for (j = 0; j < setnum; j++) {
kvobj *kv = lookupKeyRead(c->db, setkeys[j]);
if (!kv) {
/* A NULL is considered an empty set */
empty += 1;
sets[j].set = NULL;
sets[j].oldsize = 0;
continue;
}
if (checkType(c, kv, OBJ_SET)) {
zfree(sets);
return;
}
sets[j].set = kv;
if (server.memory_tracking_enabled)
sets[j].oldsize = kvobjAllocSize(kv);
}
/* Set intersection with an empty set always results in an empty set.
* Return ASAP if there is an empty set. */
if (empty > 0) {
zfree(sets);
if (dstkey) {
if (dbDelete(c->db,dstkey)) {
keyModified(c,c->db,dstkey,NULL,1);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",dstkey,c->db->id);
server.dirty++;
}
addReply(c,shared.czero);
} else if (cardinality_only) {
addReplyLongLong(c,cardinality);
} else {
addReply(c,shared.emptyset[c->resp]);
}
return;
}
/* Sort sets from the smallest to largest, this will improve our
* algorithm's performance */
qsort(sets,setnum,sizeof(setopsrc),qsortCompareSetsByCardinality);
/* The first thing we should output is the total number of elements...
* since this is a multi-bulk write, but at this stage we don't know
* the intersection set size, so we use a trick, append an empty object
* to the output list and save the pointer to later modify it with the
* right length */
if (dstkey) {
/* If we have a target key where to store the resulting set
* create this key with an empty set inside */
if (sets[0].set->encoding == OBJ_ENCODING_INTSET) {
/* The first set is an intset, so the result is an intset too. The
* elements are inserted in ascending order which is efficient in an
* intset. */
dstset = createIntsetObject();
} else if (sets[0].set->encoding == OBJ_ENCODING_LISTPACK) {
/* To avoid many reallocs, we estimate that the result is a listpack
* of approximately the same size as the first set. Then we shrink
* it or possibly convert it to intset in the end. */
unsigned char *lp = lpNew(lpBytes(sets[0].set->ptr));
dstset = createObject(OBJ_SET, lp);
dstset->encoding = OBJ_ENCODING_LISTPACK;
} else {
/* We start off with a listpack, since it's more efficient to append
* to than an intset. Later we can convert it to intset or a
* hashtable. */
dstset = createSetListpackObject();
}
} else if (!cardinality_only) {
replylen = addReplyDeferredLen(c);
}
/* Iterate all the elements of the first (smallest) set, and test
* the element against all the other sets, if at least one set does
* not include the element it is discarded */
int only_integers = 1;
setTypeInitIterator(&si, sets[0].set);
while((encoding = setTypeNext(&si, &str, &len, &intobj)) != -1) {
for (j = 1; j < setnum; j++) {
if (sets[j].set == sets[0].set) continue;
if (!setTypeIsMemberAux(sets[j].set, str, len, intobj,
encoding == OBJ_ENCODING_HT))
break;
}
/* Only take action when all sets contain the member */
if (j == setnum) {
if (cardinality_only) {
cardinality++;
/* We stop the searching after reaching the limit. */
if (limit && cardinality >= limit)
break;
} else if (!dstkey) {
if (str != NULL)
addReplyBulkCBuffer(c, str, len);
else
addReplyBulkLongLong(c,intobj);
cardinality++;
} else {
if (str && only_integers) {
/* It may be an integer although we got it as a string. */
if (encoding == OBJ_ENCODING_HT &&
string2ll(str, len, (long long *)&intobj))
{
if (dstset->encoding == OBJ_ENCODING_LISTPACK ||
dstset->encoding == OBJ_ENCODING_INTSET)
{
/* Adding it as an integer is more efficient. */
str = NULL;
}
} else {
/* It's not an integer */
only_integers = 0;
}
}
setTypeAddAux(dstset, str, len, intobj, encoding == OBJ_ENCODING_HT);
}
}
}
setTypeResetIterator(&si);
if (server.memory_tracking_enabled) {
for (j = 0; j < setnum; j++) {
robj *obj = sets[j].set;
if (!obj) continue;
updateSlotAllocSize(c->db, getKeySlot(setkeys[j]->ptr), obj,
sets[j].oldsize, kvobjAllocSize(obj));
}
}
if (cardinality_only) {
addReplyLongLong(c,cardinality);
} else if (dstkey) {
/* Store the resulting set into the target, if the intersection
* is not an empty set. */
if (setTypeSize(dstset) > 0) {
if (only_integers) maybeConvertToIntset(dstset);
if (dstset->encoding == OBJ_ENCODING_LISTPACK) {
/* We allocated too much memory when we created it to avoid
* frequent reallocs. Therefore, we shrink it now. */
dstset->ptr = lpShrinkToFit(dstset->ptr);
}
setKey(c, c->db, dstkey, &dstset, 0);
addReplyLongLong(c,setTypeSize(dstset));
notifyKeyspaceEvent(NOTIFY_SET,"sinterstore",
dstkey,c->db->id);
server.dirty++;
} else {
addReply(c,shared.czero);
if (dbDelete(c->db,dstkey)) {
server.dirty++;
keyModified(c,c->db,dstkey,NULL,1);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",dstkey,c->db->id);
}
decrRefCount(dstset);
}
} else {
setDeferredSetLen(c,replylen,cardinality);
}
zfree(sets);
}
/* SINTER key [key ...] */
void sinterCommand(client *c) {
sinterGenericCommand(c, c->argv+1, c->argc-1, NULL, 0, 0);
}
/* SMEMBERS key */
void smembersCommand(client *c) {
setTypeIterator si;
char *str;
size_t len = 0;
int64_t intobj = 0;
size_t oldsize = 0;
kvobj *setobj = lookupKeyRead(c->db, c->argv[1]);
if (checkType(c,setobj,OBJ_SET)) return;
if (!setobj) {
addReply(c, shared.emptyset[c->resp]);
return;
}
/* Prepare the response. */
unsigned long length = setTypeSize(setobj);
addReplySetLen(c,length);
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(setobj);
/* Iterate through the elements of the set. */
setTypeInitIterator(&si, setobj);
while (setTypeNext(&si, &str, &len, &intobj) != -1) {
if (str != NULL)
addReplyBulkCBuffer(c, str, len);
else
addReplyBulkLongLong(c, intobj);
length--;
}
setTypeResetIterator(&si);
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), setobj, oldsize, kvobjAllocSize(setobj));
serverAssert(length == 0); /* fail on corrupt data */
}
/* SINTERCARD numkeys key [key ...] [LIMIT limit] */
void sinterCardCommand(client *c) {
long j;
long numkeys = 0; /* Number of keys. */
long limit = 0; /* 0 means not limit. */
if (getRangeLongFromObjectOrReply(c, c->argv[1], 1, LONG_MAX,
&numkeys, "numkeys should be greater than 0") != C_OK)
return;
if (numkeys > (c->argc - 2)) {
addReplyError(c, "Number of keys can't be greater than number of args");
return;
}
for (j = 2 + numkeys; j < c->argc; j++) {
char *opt = c->argv[j]->ptr;
int moreargs = (c->argc - 1) - j;
if (!strcasecmp(opt, "LIMIT") && moreargs) {
j++;
if (getPositiveLongFromObjectOrReply(c, c->argv[j], &limit,
"LIMIT can't be negative") != C_OK)
return;
} else {
addReplyErrorObject(c, shared.syntaxerr);
return;
}
}
sinterGenericCommand(c, c->argv+2, numkeys, NULL, 1, limit);
}
/* SINTERSTORE destination key [key ...] */
void sinterstoreCommand(client *c) {
sinterGenericCommand(c, c->argv+2, c->argc-2, c->argv[1], 0, 0);
}
void sunionDiffGenericCommand(client *c, robj **setkeys, int setnum,
robj *dstkey, int op) {
setopsrc *sets = zmalloc(sizeof(setopsrc)*setnum);
setTypeIterator si;
robj *dstset = NULL;
int dstset_encoding = OBJ_ENCODING_INTSET;
char *str;
size_t len = 0;
int64_t llval = 0;
int encoding;
int j, cardinality = 0;
int diff_algo = 1;
int sameset = 0;
for (j = 0; j < setnum; j++) {
kvobj *setobj = lookupKeyRead(c->db, setkeys[j]);
if (!setobj) {
sets[j].set = NULL;
sets[j].oldsize = 0;
continue;
}
if (checkType(c,setobj,OBJ_SET)) {
zfree(sets);
return;
}
/* For a SET's encoding, according to the factory method setTypeCreate(), currently have 3 types:
* 1. OBJ_ENCODING_INTSET
* 2. OBJ_ENCODING_LISTPACK
* 3. OBJ_ENCODING_HT
* 'dstset_encoding' is used to determine which kind of encoding to use when initialize 'dstset'.
*
* If all sets are all OBJ_ENCODING_INTSET encoding or 'dstkey' is not null, keep 'dstset'
* OBJ_ENCODING_INTSET encoding when initialize. Otherwise it is not efficient to create the 'dstset'
* from intset and then convert to listpack or hashtable.
*
* If one of the set is OBJ_ENCODING_LISTPACK, let's set 'dstset' to hashtable default encoding,
* the hashtable is more efficient when find and compare than the listpack. The corresponding
* time complexity are O(1) vs O(n). */
if (!dstkey && dstset_encoding == OBJ_ENCODING_INTSET &&
(setobj->encoding == OBJ_ENCODING_LISTPACK || setobj->encoding == OBJ_ENCODING_HT)) {
dstset_encoding = OBJ_ENCODING_HT;
}
sets[j].set = setobj;
if (server.memory_tracking_enabled)
sets[j].oldsize = kvobjAllocSize(setobj);
if (j > 0 && sets[0].set == sets[j].set) {
sameset = 1;
}
}
/* Select what DIFF algorithm to use.
*
* Algorithm 1 is O(N*M) where N is the size of the element first set
* and M the total number of sets.
*
* Algorithm 2 is O(N) where N is the total number of elements in all
* the sets.
*
* We compute what is the best bet with the current input here. */
if (op == SET_OP_DIFF && sets[0].set && !sameset) {
long long algo_one_work = 0, algo_two_work = 0;
for (j = 0; j < setnum; j++) {
if (sets[j].set == NULL) continue;
algo_one_work += setTypeSize(sets[0].set);
algo_two_work += setTypeSize(sets[j].set);
}
/* Algorithm 1 has better constant times and performs less operations
* if there are elements in common. Give it some advantage. */
algo_one_work /= 2;
diff_algo = (algo_one_work <= algo_two_work) ? 1 : 2;
if (diff_algo == 1 && setnum > 1) {
/* With algorithm 1 it is better to order the sets to subtract
* by decreasing size, so that we are more likely to find
* duplicated elements ASAP. */
qsort(sets+1,setnum-1,sizeof(setopsrc),
qsortCompareSetsByRevCardinality);
}
}
/* We need a temp set object to store our union/diff. If the dstkey
* is not NULL (that is, we are inside an SUNIONSTORE/SDIFFSTORE operation) then
* this set object will be the resulting object to set into the target key*/
if (dstset_encoding == OBJ_ENCODING_INTSET) {
dstset = createIntsetObject();
} else {
dstset = createSetObject();
}
if (op == SET_OP_UNION) {
/* Union is trivial, just add every element of every set to the
* temporary set. */
for (j = 0; j < setnum; j++) {
if (!sets[j].set) continue; /* non existing keys are like empty sets */
setTypeInitIterator(&si, sets[j].set);
while ((encoding = setTypeNext(&si, &str, &len, &llval)) != -1) {
cardinality += setTypeAddAux(dstset, str, len, llval, encoding == OBJ_ENCODING_HT);
}
setTypeResetIterator(&si);
}
} else if (op == SET_OP_DIFF && sameset) {
/* At least one of the sets is the same one (same key) as the first one, result must be empty. */
} else if (op == SET_OP_DIFF && sets[0].set && diff_algo == 1) {
/* DIFF Algorithm 1:
*
* We perform the diff by iterating all the elements of the first set,
* and only adding it to the target set if the element does not exist
* into all the other sets.
*
* This way we perform at max N*M operations, where N is the size of
* the first set, and M the number of sets. */
setTypeInitIterator(&si, sets[0].set);
while ((encoding = setTypeNext(&si, &str, &len, &llval)) != -1) {
for (j = 1; j < setnum; j++) {
if (!sets[j].set) continue; /* no key is an empty set. */
if (sets[j].set == sets[0].set) break; /* same set! */
if (setTypeIsMemberAux(sets[j].set, str, len, llval,
encoding == OBJ_ENCODING_HT))
break;
}
if (j == setnum) {
/* There is no other set with this element. Add it. */
cardinality += setTypeAddAux(dstset, str, len, llval, encoding == OBJ_ENCODING_HT);
}
}
setTypeResetIterator(&si);
} else if (op == SET_OP_DIFF && sets[0].set && diff_algo == 2) {
/* DIFF Algorithm 2:
*
* Add all the elements of the first set to the auxiliary set.
* Then remove all the elements of all the next sets from it.
*
* This is O(N) where N is the sum of all the elements in every
* set. */
for (j = 0; j < setnum; j++) {
if (!sets[j].set) continue; /* non existing keys are like empty sets */
setTypeInitIterator(&si, sets[j].set);
while((encoding = setTypeNext(&si, &str, &len, &llval)) != -1) {
if (j == 0) {
cardinality += setTypeAddAux(dstset, str, len, llval,
encoding == OBJ_ENCODING_HT);
} else {
cardinality -= setTypeRemoveAux(dstset, str, len, llval,
encoding == OBJ_ENCODING_HT);
}
}
setTypeResetIterator(&si);
/* Exit if result set is empty as any additional removal
* of elements will have no effect. */
if (cardinality == 0) break;
}
}
if (server.memory_tracking_enabled) {
for (j = 0; j < setnum; j++) {
robj *obj = sets[j].set;
if (!obj) continue;
updateSlotAllocSize(c->db, getKeySlot(setkeys[j]->ptr), obj,
sets[j].oldsize, kvobjAllocSize(obj));
}
}
/* Output the content of the resulting set, if not in STORE mode */
if (!dstkey) {
addReplySetLen(c,cardinality);
setTypeInitIterator(&si, dstset);
while (setTypeNext(&si, &str, &len, &llval) != -1) {
if (str)
addReplyBulkCBuffer(c, str, len);
else
addReplyBulkLongLong(c, llval);
}
setTypeResetIterator(&si);
server.lazyfree_lazy_server_del ? freeObjAsync(NULL, dstset, -1) :
decrRefCount(dstset);
} else {
/* If we have a target key where to store the resulting set
* create this key with the result set inside */
if (setTypeSize(dstset) > 0) {
setKey(c, c->db, dstkey, &dstset, 0);
addReplyLongLong(c,setTypeSize(dstset));
notifyKeyspaceEvent(NOTIFY_SET,
op == SET_OP_UNION ? "sunionstore" : "sdiffstore",
dstkey,c->db->id);
server.dirty++;
} else {
addReply(c,shared.czero);
if (dbDelete(c->db,dstkey)) {
server.dirty++;
keyModified(c,c->db,dstkey,NULL,1);
notifyKeyspaceEvent(NOTIFY_GENERIC,"del",dstkey,c->db->id);
}
decrRefCount(dstset);
}
}
zfree(sets);
}
/* SUNION key [key ...] */
void sunionCommand(client *c) {
sunionDiffGenericCommand(c,c->argv+1,c->argc-1,NULL,SET_OP_UNION);
}
/* SUNIONSTORE destination key [key ...] */
void sunionstoreCommand(client *c) {
sunionDiffGenericCommand(c,c->argv+2,c->argc-2,c->argv[1],SET_OP_UNION);
}
/* SDIFF key [key ...] */
void sdiffCommand(client *c) {
sunionDiffGenericCommand(c,c->argv+1,c->argc-1,NULL,SET_OP_DIFF);
}
/* SDIFFSTORE destination key [key ...] */
void sdiffstoreCommand(client *c) {
sunionDiffGenericCommand(c,c->argv+2,c->argc-2,c->argv[1],SET_OP_DIFF);
}
void sscanCommand(client *c) {
kvobj *set;
unsigned long long cursor;
size_t oldsize = 0;
if (parseScanCursorOrReply(c,c->argv[2],&cursor) == C_ERR) return;
if ((set = lookupKeyReadOrReply(c,c->argv[1],shared.emptyscan)) == NULL ||
checkType(c,set,OBJ_SET)) return;
if (server.memory_tracking_enabled)
oldsize = kvobjAllocSize(set);
scanGenericCommand(c,set,cursor);
if (server.memory_tracking_enabled)
updateSlotAllocSize(c->db, getKeySlot(c->argv[1]->ptr), set, oldsize, kvobjAllocSize(set));
} | c | github | https://github.com/redis/redis | src/t_set.c |
from gluon.contrib.memcache.memcache import Client
from gluon.cache import CacheAbstract
import time
"""
examle of usage:
cache.memcache = MemcacheClient(request,[127.0.0.1:11211],debug=true)
"""
import cPickle as pickle
import thread
from gluon import current
DEFAULT_TIME_EXPIRE = 300 # seconds (must be the same as cache.ram)
def MemcacheClient(*a, **b):
if not hasattr(current,'__mc_instance'):
current.__memcache_client = MemcacheClientObj(*a, **b)
return current.__memcache_client
class MemcacheClientObj(Client):
meta_storage = {}
max_time_expire = 24*3600
def __init__(self, request, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None,
default_time_expire = DEFAULT_TIME_EXPIRE):
self.request=request
self.default_time_expire = default_time_expire
if request:
app = request.application
else:
app = ''
Client.__init__(self, servers, debug, pickleProtocol,
pickler, unpickler, pload, pid)
if not app in self.meta_storage:
self.storage = self.meta_storage[app] = {
CacheAbstract.cache_stats_name: {
'hit_total': 0,
'misses': 0,
}}
else:
self.storage = self.meta_storage[app]
def __call__(self, key, f, time_expire = 'default'):
if time_expire == 'default':
time_expire = self.default_time_expire
if time_expire == None:
time_expire = self.max_time_expire
# this must be commented because get and set are redefined
# key = self.__keyFormat__(key)
now = time.time()
value = None
if f is None: # force deletion of value
self.delete(key)
return None
elif time_expire==0: # value forced expired
item = None # value to be computed
else:
item = self.get(key)
if item:
if not isinstance(item,(list,tuple)):
value = item
elif (item[0] < now - time_expire): # value expired
item = None # value to be computed
else:
value = item[1]
if not item:
value = f()
self.set(key, (now,value), self.max_time_expire)
return value
def increment(self, key, value=1, time_expire='default'):
""" time_expire is ignored """
if time_expire == 'default':
time_expire = self.default_time_expire
newKey = self.__keyFormat__(key)
obj = Client.get(self, newKey)
if obj:
if isinstance(obj,(int,double,long)):
return Client.incr(self, newKey, value)
else:
value += obj[1]
Client.set(self,newKey,(time.time(),value),
self.max_time_expire)
return value
else:
Client.set(self, newKey, value, self.max_time_expire)
return value
def set(self, key, value, time_expire='default'):
if time_expire == 'default':
time_expire = self.default_time_expire
newKey = self.__keyFormat__(key)
return Client.set(self, newKey, value, time_expire)
def get(self, key):
newKey = self.__keyFormat__(key)
return Client.get(self, newKey)
def delete(self, key):
newKey = self.__keyFormat__(key)
return Client.delete(self, newKey)
def __keyFormat__(self, key):
return '%s/%s' % (self.request.application, key.replace(' ', '_')) | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
from pyrr import Matrix44
import moderngl
from ported._example import Example
def grid(size, steps):
u = np.repeat(np.linspace(-size, size, steps), 2)
v = np.tile([-size, size], steps)
w = np.zeros(steps * 2)
return np.concatenate([np.dstack([u, v, w]), np.dstack([v, u, w])])
class SimpleGrid(Example):
title = "Simple Grid"
gl_version = (3, 3)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
uniform mat4 Mvp;
in vec3 in_vert;
void main() {
gl_Position = Mvp * vec4(in_vert, 1.0);
}
''',
fragment_shader='''
#version 330
out vec4 f_color;
void main() {
f_color = vec4(0.1, 0.1, 0.1, 1.0);
}
''',
)
self.mvp = self.prog['Mvp']
self.vbo = self.ctx.buffer(grid(15, 10).astype('f4'))
self.vao = self.ctx.simple_vertex_array(self.prog, self.vbo, 'in_vert')
def render(self, time, frame_time):
self.ctx.clear(1.0, 1.0, 1.0)
self.ctx.enable(moderngl.DEPTH_TEST)
proj = Matrix44.perspective_projection(45.0, self.aspect_ratio, 0.1, 1000.0)
lookat = Matrix44.look_at(
(40.0, 30.0, 30.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 1.0),
)
self.mvp.write((proj * lookat).astype('f4'))
self.vao.render(moderngl.LINES)
if __name__ == '__main__':
SimpleGrid.run() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Router_5_2_7(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '5 2 7'
golden_devices_required = 16
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import numpy.testing as npt
from scipy import integrate
from scipy import stats
from scipy.special import betainc
from common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect,
check_kurt_expect, check_entropy,
check_private_entropy,
check_edge_support, check_named_args,
check_random_state_property,
check_meth_dtype, check_ppf_dtype, check_cmplx_deriv,
check_pickling, check_rvs_broadcast)
from scipy.stats._distr_params import distcont
"""
Test all continuous distributions.
Parameters were chosen for those distributions that pass the
Kolmogorov-Smirnov test. This provides safe parameters for each
distributions so that we can perform further testing of class methods.
These tests currently check only/mostly for serious errors and exceptions,
not for numerically exact results.
"""
# Note that you need to add new distributions you want tested
# to _distr_params
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
# Last four of these fail all around. Need to be checked
distcont_extra = [
['betaprime', (100, 86)],
['fatiguelife', (5,)],
['mielke', (4.6420495492121487, 0.59707419545516938)],
['invweibull', (0.58847112119264788,)],
# burr: sample mean test fails still for c<1
['burr', (0.94839838075366045, 4.3820284068855795)],
# genextreme: sample mean test, sf-logsf test fail
['genextreme', (3.3184017469423535,)],
]
distslow = ['rdist', 'gausshyper', 'recipinvgauss', 'ksone', 'genexpon',
'vonmises', 'vonmises_line', 'mielke', 'semicircular',
'cosine', 'invweibull', 'powerlognorm', 'johnsonsu', 'kstwobign']
# distslow are sorted by speed (very slow to slow)
# These distributions fail the complex derivative test below.
# Here 'fail' mean produce wrong results and/or raise exceptions, depending
# on the implementation details of corresponding special functions.
# cf https://github.com/scipy/scipy/pull/4979 for a discussion.
fails_cmplx = set(['beta', 'betaprime', 'chi', 'chi2', 'dgamma', 'dweibull',
'erlang', 'f', 'gamma', 'gausshyper', 'gengamma',
'gennorm', 'genpareto', 'halfgennorm', 'invgamma',
'ksone', 'kstwobign', 'levy_l', 'loggamma', 'logistic',
'maxwell', 'nakagami', 'ncf', 'nct', 'ncx2',
'pearson3', 'rice', 't', 'skewnorm', 'tukeylambda',
'vonmises', 'vonmises_line',])
def test_cont_basic():
# this test skips slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname in distslow:
continue
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 500
rvs = distfn.rvs(size=sn, *arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
yield (check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn,
distname + 'sample mean test')
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_x = {'frechet_l': -0.5, 'weibull_max': -0.5, 'levy_l': -0.5,
'pareto': 1.5, 'tukeylambda': 0.3}
x = spec_x.get(distname, 0.5)
yield check_named_args, distfn, x, arg, locscale_defaults, meths
yield check_random_state_property, distfn, arg
yield check_pickling, distfn, arg
# Entropy
skp = npt.dec.skipif
yield check_entropy, distfn, arg, distname
if distfn.numargs == 0:
yield check_vecentropy, distfn, arg
if distfn.__class__._entropy != stats.rv_continuous._entropy:
yield check_private_entropy, distfn, arg, stats.rv_continuous
yield check_edge_support, distfn, arg
yield check_meth_dtype, distfn, arg, meths
yield check_ppf_dtype, distfn, arg
yield skp(distname in fails_cmplx)(check_cmplx_deriv), distfn, arg
knf = npt.dec.knownfailureif
yield (knf(distname == 'truncnorm')(check_ppf_private), distfn,
arg, distname)
def test_levy_stable_random_state_property():
# levy_stable only implements rvs(), so it is skipped in the
# main loop in test_cont_basic(). Here we apply just the test
# check_random_state_property to levy_stable.
check_random_state_property(stats.levy_stable, (0.5, 0.1))
@npt.dec.slow
def test_cont_basic_slow():
# same as above for slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname not in distslow:
continue
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 500
rvs = distfn.rvs(size=sn, *arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
yield (check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn,
distname + 'sample mean test')
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
# yield check_oth, distfn, arg # is still missing
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
x = 0.5
if distname == 'invweibull':
arg = (1,)
elif distname == 'ksone':
arg = (3,)
yield check_named_args, distfn, x, arg, locscale_defaults, meths
yield check_random_state_property, distfn, arg
yield check_pickling, distfn, arg
# Entropy
skp = npt.dec.skipif
ks_cond = distname in ['ksone', 'kstwobign']
yield skp(ks_cond)(check_entropy), distfn, arg, distname
if distfn.numargs == 0:
yield check_vecentropy, distfn, arg
if (distfn.__class__._entropy != stats.rv_continuous._entropy
and distname != 'vonmises'):
yield check_private_entropy, distfn, arg, stats.rv_continuous
yield check_edge_support, distfn, arg
yield check_meth_dtype, distfn, arg, meths
yield check_ppf_dtype, distfn, arg
yield skp(distname in fails_cmplx)(check_cmplx_deriv), distfn, arg
@npt.dec.slow
def test_moments():
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
category=integrate.IntegrationWarning)
knf = npt.dec.knownfailureif
fail_normalization = set(['vonmises', 'ksone'])
fail_higher = set(['vonmises', 'ksone', 'ncf'])
for distname, arg in distcont[:]:
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
m, v, s, k = distfn.stats(*arg, moments='mvsk')
cond1 = distname in fail_normalization
cond2 = distname in fail_higher
msg = distname + ' fails moments'
yield knf(cond1, msg)(check_normalization), distfn, arg, distname
yield knf(cond2, msg)(check_mean_expect), distfn, arg, m, distname
yield (knf(cond2, msg)(check_var_expect), distfn, arg, m, v,
distname)
yield (knf(cond2, msg)(check_skew_expect), distfn, arg, m, v, s,
distname)
yield (knf(cond2, msg)(check_kurt_expect), distfn, arg, m, v, k,
distname)
yield check_loc_scale, distfn, arg, m, v, distname
yield check_moment, distfn, arg, m, v, distname
def test_rvs_broadcast():
for dist, shape_args in distcont:
# If shape_only is True, it means the _rvs method of the
# distribution uses more than one random number to generate a random
# variate. That means the result of using rvs with broadcasting or
# with a nontrivial size will not necessarily be the same as using the
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
# of the results, not the values.
# Whether or not a distribution is in the following list is an
# implementation detail of the distribution, not a requirement. If
# the implementation the rvs() method of a distribution changes, this
# test might also have to be changed.
shape_only = dist in ['betaprime', 'dgamma', 'exponnorm',
'nct', 'dweibull', 'rice', 'levy_stable',
'skewnorm']
distfunc = getattr(stats, dist)
loc = np.zeros(2)
scale = np.ones((3, 1))
nargs = distfunc.numargs
allargs = []
bshape = [3, 2]
# Generate shape parameter arguments...
for k in range(nargs):
shp = (k + 4,) + (1,)*(k + 2)
allargs.append(shape_args[k]*np.ones(shp))
bshape.insert(0, k + 4)
allargs.extend([loc, scale])
# bshape holds the expected shape when loc, scale, and the shape
# parameters are all broadcast together.
yield check_rvs_broadcast, distfunc, dist, allargs, bshape, shape_only, 'd'
def test_rvs_gh2069_regression():
# Regression tests for gh-2069. In scipy 0.17 and earlier,
# these tests would fail.
#
# A typical example of the broken behavior:
# >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5))
# array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705])
np.random.seed(123)
vals = stats.norm.rvs(loc=np.zeros(5), scale=1)
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=0, scale=np.ones(5))
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5))
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5))
d = np.diff(vals.ravel())
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
npt.assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]],
[[1, 1], [1, 1]], 1)
npt.assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2))
npt.assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0],
[[1], [2]], (4,))
def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg):
# this did not work, skipped silently by nose
if np.isfinite(m):
check_sample_mean(sm, sv, sn, m)
if np.isfinite(v):
check_sample_var(sv, sn, v)
def check_sample_mean(sm, v, n, popmean):
# from stats.stats.ttest_1samp(a, popmean):
# Calculates the t-obtained for the independent samples T-test on ONE group
# of scores a, given a population mean.
#
# Returns: t-value, two-tailed prob
df = n-1
svar = ((n-1)*v) / float(df) # looks redundant
t = (sm-popmean) / np.sqrt(svar*(1.0/n))
prob = betainc(0.5*df, 0.5, df/(df + t*t))
# return t,prob
npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m, sm=%f,%f' %
(t, prob, popmean, sm))
def check_sample_var(sv, n, popvar):
# two-sided chisquare test for sample variance equal to
# hypothesized variance
df = n-1
chi2 = (n-1)*popvar/float(popvar)
pval = stats.distributions.chi2.sf(chi2, df) * 2
npt.assert_(pval > 0.01, 'var fail, t, pval = %f, %f, v, sv=%f, %f' %
(chi2, pval, popvar, sv))
def check_cdf_ppf(distfn, arg, msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg=msg +
' - cdf-ppf roundtrip')
def check_sf_isf(distfn, arg, msg):
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg),
[0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg +
' - sf-isf roundtrip')
npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg),
1.0 - distfn.sf([0.1, 0.9], *arg),
decimal=DECIMAL, err_msg=msg +
' - cdf-sf relationship')
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or
# huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
msg += ' - cdf-pdf relationship'
npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg)
def check_pdf_logpdf(distfn, args, msg):
# compares pdf at several points with the log of the pdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[pdf != 0]
logpdf = logpdf[np.isfinite(logpdf)]
msg += " - logpdf-log(pdf) relationship"
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
def check_sf_logsf(distfn, args, msg):
# compares sf at several points with the log of the sf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
sf = distfn.sf(vals, *args)
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
msg += " - logsf-log(sf) relationship"
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg)
def check_cdf_logcdf(distfn, args, msg):
# compares cdf at several points with the log of the cdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
cdf = distfn.cdf(vals, *args)
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
msg += " - logcdf-log(cdf) relationship"
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg)
def check_distribution_rvs(dist, args, alpha, rvs):
# test from scipy.stats.tests
# this version reuses existing random variables
D, pval = stats.kstest(rvs, dist, args=args, N=1000)
if (pval < alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
def check_vecentropy(distfn, args):
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
def check_loc_scale(distfn, arg, m, v, msg):
loc, scale = 10.0, 10.0
mt, vt = distfn.stats(loc=loc, scale=scale, *arg)
npt.assert_allclose(m*scale + loc, mt)
npt.assert_allclose(v*scale*scale, vt)
def check_ppf_private(distfn, arg, msg):
# fails by design for truncnorm self.nb not defined
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
if __name__ == "__main__":
npt.run_module_suite() | unknown | codeparrot/codeparrot-clean | ||
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file implements OpenAPI 3.2 specification generation for the Prometheus HTTP API.
// It provides dynamic spec building with optional path filtering.
package v1
import (
"log/slog"
"net/http"
"net/url"
"path"
"strings"
"sync"
"github.com/pb33f/libopenapi/datamodel/high/base"
v3 "github.com/pb33f/libopenapi/datamodel/high/v3"
"github.com/pb33f/libopenapi/orderedmap"
)
const (
// OpenAPI 3.1.0 is the default version with broader compatibility.
openAPIVersion31 = "3.1.0"
// OpenAPI 3.2.0 supports advanced features like itemSchema for SSE streams.
openAPIVersion32 = "3.2.0"
)
// OpenAPIOptions configures the OpenAPI spec builder.
type OpenAPIOptions struct {
// IncludePaths filters which paths to include in the spec.
// If empty, all paths are included.
// Paths are matched by prefix (e.g., "/query" matches "/query" and "/query_range").
IncludePaths []string
// ExternalURL is the external URL of the Prometheus server (e.g., "http://prometheus.example.com:9090").
ExternalURL string
// Version is the API version to include in the OpenAPI spec.
// If empty, defaults to "0.0.1-undefined".
Version string
}
// OpenAPIBuilder builds and caches OpenAPI specifications.
type OpenAPIBuilder struct {
mu sync.RWMutex
cachedYAML31 []byte // Cached OpenAPI 3.1 spec.
cachedYAML32 []byte // Cached OpenAPI 3.2 spec.
options OpenAPIOptions
logger *slog.Logger
}
// NewOpenAPIBuilder creates a new OpenAPI builder with the given options.
func NewOpenAPIBuilder(opts OpenAPIOptions, logger *slog.Logger) *OpenAPIBuilder {
b := &OpenAPIBuilder{
options: opts,
logger: logger,
}
b.rebuild()
return b
}
// rebuild constructs the OpenAPI specs for both 3.1 and 3.2 versions based on current options.
func (b *OpenAPIBuilder) rebuild() {
b.mu.Lock()
defer b.mu.Unlock()
// Build OpenAPI 3.1 spec.
doc31 := b.buildDocument(openAPIVersion31)
yamlBytes31, err := doc31.Render()
if err != nil {
b.logger.Error("failed to render OpenAPI 3.1 spec - this is a bug, please report it", "err", err)
return
}
b.cachedYAML31 = yamlBytes31
// Build OpenAPI 3.2 spec.
doc32 := b.buildDocument(openAPIVersion32)
yamlBytes32, err := doc32.Render()
if err != nil {
b.logger.Error("failed to render OpenAPI 3.2 spec - this is a bug, please report it", "err", err)
return
}
b.cachedYAML32 = yamlBytes32
}
// ServeOpenAPI returns the OpenAPI specification as YAML.
// By default, serves OpenAPI 3.1.0. Use ?openapi_version=3.2 for OpenAPI 3.2.0.
func (b *OpenAPIBuilder) ServeOpenAPI(w http.ResponseWriter, r *http.Request) {
// Parse query parameter to determine which version to serve.
requestedVersion := r.URL.Query().Get("openapi_version")
b.mu.RLock()
var yamlData []byte
switch requestedVersion {
case "3.2", "3.2.0":
yamlData = b.cachedYAML32
case "3.1", "3.1.0":
yamlData = b.cachedYAML31
default:
// Default to OpenAPI 3.1.0 for broader compatibility.
yamlData = b.cachedYAML31
}
b.mu.RUnlock()
w.Header().Set("Content-Type", "application/yaml; charset=utf-8")
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.WriteHeader(http.StatusOK)
w.Write(yamlData)
}
// WrapHandler returns the handler unchanged (no validation).
func (*OpenAPIBuilder) WrapHandler(next http.HandlerFunc) http.HandlerFunc {
return next
}
// shouldIncludePath checks if a path should be included based on options.
func (b *OpenAPIBuilder) shouldIncludePath(path string) bool {
if len(b.options.IncludePaths) == 0 {
return true
}
for _, include := range b.options.IncludePaths {
if strings.HasPrefix(path, include) || path == include {
return true
}
}
return false
}
// shouldIncludePathForVersion checks if a path should be included for a specific OpenAPI version.
func (b *OpenAPIBuilder) shouldIncludePathForVersion(path, version string) bool {
// First check IncludePaths filter.
if !b.shouldIncludePath(path) {
return false
}
// OpenAPI 3.1 excludes paths that require 3.2 features.
// The /notifications/live endpoint uses itemSchema which is a 3.2-only feature.
if version == openAPIVersion31 && path == "/notifications/live" {
return false
}
return true
}
// buildDocument creates the OpenAPI document for the specified version using high-level structs.
func (b *OpenAPIBuilder) buildDocument(version string) *v3.Document {
return &v3.Document{
Version: version,
Info: b.buildInfo(),
Servers: b.buildServers(),
Tags: b.buildTags(version),
Paths: b.buildPaths(version),
Components: b.buildComponents(),
}
}
// buildInfo constructs the info section.
func (b *OpenAPIBuilder) buildInfo() *base.Info {
apiVersion := b.options.Version
if apiVersion == "" {
apiVersion = "0.0.1-undefined"
}
return &base.Info{
Title: "Prometheus API",
Description: "Prometheus is an Open-Source monitoring system with a dimensional data model, flexible query language, efficient time series database and modern alerting approach.",
Version: apiVersion,
Contact: &base.Contact{
Name: "Prometheus Community",
URL: "https://prometheus.io/community/",
},
}
}
// buildServers constructs the servers section.
func (b *OpenAPIBuilder) buildServers() []*v3.Server {
// ExternalURL is always set by computeExternalURL in main.go.
// It includes scheme, host, port, and optional path prefix (without trailing slash).
serverURL := "/api/v1"
if b.options.ExternalURL != "" {
baseURL, err := url.Parse(b.options.ExternalURL)
if err == nil {
// Use path.Join to properly append /api/v1 to the existing path.
// Then use ResolveReference to construct the full URL.
baseURL.Path = path.Join(baseURL.Path, "/api/v1")
serverURL = baseURL.String()
}
}
return []*v3.Server{
{URL: serverURL},
}
}
// buildTags constructs the global tags list.
// Tag summary is an OpenAPI 3.2 feature, excluded from 3.1.
// Tag description is supported in both 3.1 and 3.2.
func (*OpenAPIBuilder) buildTags(version string) []*base.Tag {
// Define tags with all metadata.
tagData := []struct {
name string
summary string
description string
}{
{"query", "Query", "Query and evaluate PromQL expressions."},
{"metadata", "Metadata", "Retrieve metric metadata such as type and unit."},
{"labels", "Labels", "Query label names and values."},
{"series", "Series", "Query and manage time series."},
{"targets", "Targets", "Retrieve target and scrape pool information."},
{"rules", "Rules", "Query recording and alerting rules."},
{"alerts", "Alerts", "Query active alerts and alertmanager discovery."},
{"status", "Status", "Retrieve server status and configuration."},
{"admin", "Admin", "Administrative operations for TSDB management."},
{"features", "Features", "Query enabled features."},
{"remote", "Remote Storage", "Remote read and write endpoints."},
{"otlp", "OTLP", "OpenTelemetry Protocol metrics ingestion."},
{"notifications", "Notifications", "Server notifications and events."},
}
tags := make([]*base.Tag, 0, len(tagData))
for _, td := range tagData {
tag := &base.Tag{
Name: td.name,
Description: td.description, // Description is supported in both 3.1 and 3.2.
}
// Summary is an OpenAPI 3.2 feature only.
if version == openAPIVersion32 {
tag.Summary = td.summary
}
tags = append(tags, tag)
}
return tags
}
// buildPaths constructs all API path definitions.
func (b *OpenAPIBuilder) buildPaths(version string) *v3.Paths {
pathItems := orderedmap.New[string, *v3.PathItem]()
allPaths := b.getAllPathDefinitions()
for pair := allPaths.First(); pair != nil; pair = pair.Next() {
if b.shouldIncludePathForVersion(pair.Key(), version) {
pathItems.Set(pair.Key(), pair.Value())
}
}
return &v3.Paths{PathItems: pathItems}
}
// getAllPathDefinitions returns all path definitions.
func (b *OpenAPIBuilder) getAllPathDefinitions() *orderedmap.Map[string, *v3.PathItem] {
paths := orderedmap.New[string, *v3.PathItem]()
// Query endpoints.
paths.Set("/query", b.queryPath())
paths.Set("/query_range", b.queryRangePath())
paths.Set("/query_exemplars", b.queryExemplarsPath())
paths.Set("/format_query", b.formatQueryPath())
paths.Set("/parse_query", b.parseQueryPath())
// Label endpoints.
paths.Set("/labels", b.labelsPath())
paths.Set("/label/{name}/values", b.labelValuesPath())
// Series endpoints.
paths.Set("/series", b.seriesPath())
// Metadata endpoints.
paths.Set("/metadata", b.metadataPath())
// Target endpoints.
paths.Set("/scrape_pools", b.scrapePoolsPath())
paths.Set("/targets", b.targetsPath())
paths.Set("/targets/metadata", b.targetsMetadataPath())
paths.Set("/targets/relabel_steps", b.targetsRelabelStepsPath())
// Rules and alerts endpoints.
paths.Set("/rules", b.rulesPath())
paths.Set("/alerts", b.alertsPath())
paths.Set("/alertmanagers", b.alertmanagersPath())
// Status endpoints.
paths.Set("/status/config", b.statusConfigPath())
paths.Set("/status/runtimeinfo", b.statusRuntimeInfoPath())
paths.Set("/status/buildinfo", b.statusBuildInfoPath())
paths.Set("/status/flags", b.statusFlagsPath())
paths.Set("/status/tsdb", b.statusTSDBPath())
paths.Set("/status/tsdb/blocks", b.statusTSDBBlocksPath())
paths.Set("/status/walreplay", b.statusWALReplayPath())
// Admin endpoints.
paths.Set("/admin/tsdb/delete_series", b.adminDeleteSeriesPath())
paths.Set("/admin/tsdb/clean_tombstones", b.adminCleanTombstonesPath())
paths.Set("/admin/tsdb/snapshot", b.adminSnapshotPath())
// Remote endpoints.
paths.Set("/read", b.remoteReadPath())
paths.Set("/write", b.remoteWritePath())
paths.Set("/otlp/v1/metrics", b.otlpWritePath())
// Notifications endpoints.
paths.Set("/notifications", b.notificationsPath())
paths.Set("/notifications/live", b.notificationsLivePath())
// Features endpoint.
paths.Set("/features", b.featuresPath())
return paths
} | go | github | https://github.com/prometheus/prometheus | web/api/v1/openapi.go |
import discord
from discord.ext import commands
import asyncio
class lyriccommands:
"""lyriccommands"""
def __init__(self, bot):
self.bot = bot
sleep = asyncio.sleep
sing = self.bot.say
@commands.command()
async def fuckthisshit(self):
"""Fuck this shit I'm out."""
sleep = asyncio.sleep
sing = self.bot.say
sleep = asyncio.sleep
await sleep(1.0)
await sing("Fuck this shit I'm out.")
await sleep(1.2)
await sing("Mmmmmm Hmmmmm")
await sleep(0.1)
await sing("Fuck this shit I'm out.")
await sleep(1.2)
await sing("No thanks.")
await sleep(0.1)
await sing("Don't mind me.")
await sleep(1.2)
await sing("Imma just grab my stuff and leave.")
await sleep(2.0)
await sing("Excuse me please.")
await sleep(1.1)
await sing("Fuck this shit I'm out.")
await sleep(1.2)
await sing("NOPE!")
await sleep(0.5)
await sing("Fuck this shit I'm out.")
await sleep(1.2)
await sing("Alright then.")
await sleep(0.5)
await sing("I dunno know what the fck just happened.")
await sleep(1.8)
await sing("But I don't really care.")
await sleep(1.2)
await sing("Imma get the fuck up outta here.")
await sleep(1.5)
await sing("Fuck this shit I'm out.")
await sleep(1.5)
await sing("https://youtu.be/9wO5TnYaJ4o?t=53s")
@commands.command()
async def princeofbelair(self, user : discord.Member=None):
sleep = asyncio.sleep
sing = self.bot.say
if user == None:
await sing("https://www.youtube.com/watch?v=AVbQo3IOC_A")
await sleep(3)
await sing("Now this is a story, all about how my life got flipped, turned upside down.")
await sleep(4)
await sing("And I like to take minute just sit right there, I'll tell you how I became the price of a town called Bel Air.")
await sleep(23)
await sing("In west Philadalphea born 'n raised, on the play ground where I spent most of my days.")
await sleep(5)
await sing("Chillin' out, maxin' relaxin' and all cool and all shooting some bball outside of the school.")
await sleep(5)
await sing("When a couple of guys said we were up to no good! Started making trouble in my neighborhood.")
await sleep(4)
await sing("I got in one little fight and my mom got scared and said 'you're moving to your auntie and uncle in Bel Air'.")
await sleep(5)
await sing("I begged and pleaded with her to have the day but she pack my suit-case and sent me on my way.")
await sleep(4)
await sing("She gave me a kiss and she gave me my ticket, I put my walkman on and said 'I might as well kick it'.")
await sleep(5)
await sing("First class, sho' this is bad, drinking orange juice outta a champange glass.")
await sleep(5)
await sing("Is this what the people of Bel Air living like?")
await sleep(2)
await sing("Hmm, this might be all right.")
await sleep(2)
await sing("But wait I hear the prissy, booze, wine and all.")
await sleep(3)
await sing("Is this the type of place they should send this cool-cat?")
await sleep(2)
await sing("I don't think so, I'll see when I get there, I hope they're prepared for The Prince of Bel Air.")
await sleep(22)
await sing("Well I, the plane landed and when I came out, there was a dude who looked like a cop standing there with my name out, I ain't trying to get arrested yet I just got here!")
await sleep(8)
await sing("I sprang with the quickness the lightning disappeared.")
await sleep(2)
await sing("I whistled for a cab, and when it came near, the license plate said fresh and had a dice in the mirror.")
await sleep(6)
await sing("If anything I could sing this cat was rare, but I thought 'man forget yet, yo homes the Bel Air'.")
await sleep(13)
await sing("I pulled up to the house about 7 or 8 and I yelled to the cabbie 'yo homes smell ya later'.")
await sleep(6)
await sing("Looked at my kingdom, I was finally here to sit on my throne as The Prince of Bel Air.")
else:
await sing("https://www.youtube.com/watch?v=AVbQo3IOC_A")
await sleep(3)
await sing("Now this is a story, all about how my life got flipped, turned upside down.")
await sleep(4)
await sing("And I like to take minute just sit right there, I'll tell you how I became the price of a town called Bel Air.")
await sleep(23)
await sing("In west Philadalphea born 'n raised, on the play ground where I spent most of my days.")
await sleep(5)
await sing("Chillin' out, maxin' relaxin' and all cool and all shooting some bball outside of the school.")
await sleep(5)
await sing("When a couple of guys said we were up to no good! Started making trouble in my neighborhood.")
await sleep(4)
await sing("I got in one little fight and my mom got scared and said 'you're moving to your auntie and uncle in Bel Air'.")
await sleep(5)
await sing("I begged and pleaded with her to have the day but she pack my suit-case and sent me on my way.")
await sleep(4)
await sing("She gave me a kiss and she gave me my ticket, I put my walkman on and said 'I might as well kick it'.")
await sleep(5)
await sing("First class, sho' this is bad, drinking orange juice outta a champange glass.")
await sleep(5)
await sing("Is this what the people of Bel Air living like?")
await sleep(2)
await sing("Hmm, this might be all right.")
await sleep(2)
await sing("But wait I hear the prissy, booze, wine and all.")
await sleep(3)
await sing("Is this the type of place they should send this cool-cat?")
await sleep(2)
await sing("I don't think so, I'll see when I get there, I hope they're prepared for {}.".format(user.mention))
await sleep(22)
await sing("Well I, the plane landed and when I came out, there was a dude who looked like a cop standing there with my name out, I ain't trying to get arrested yet I just got here!")
await sleep(8)
await sing("I sprang with the quickness the lightning disappeared.")
await sleep(2)
await sing("I whistled for a cab, and when it came near, the license plate said fresh and had a dice in the mirror.")
await sleep(6)
await sing("If anything I could sing this cat was rare, but I thought 'man forget yet, yo homes the Bel Air'.")
await sleep(13)
await sing("I pulled up to the house about 7 or 8 and I yelled to the cabbie 'yo homes smell ya later'.")
await sleep(6)
await sing("Looked at my kingdom, I was finally here to sit on my throne as {}.".format(user.mention))
@commands.command(name="heman", aliases=["heeman"])
async def heman(self):
"""HEYYEYAAEYAAAEYAEYAA."""
sleep = asyncio.sleep
sing = self.bot.say
song = self.bot.say
await song("https://www.youtube.com/watch?v=ZZ5LpwO-An4")
await sleep(4)
await sing("And so I cry sometime when I'm lying in bed.")
await sleep(3)
await sing("To just give it all up, what's in my head.")
await sleep(4)
await sing("And I! I'm feeling a little peculiar.")
await sleep(6)
await sing("And so I wake in the morning and I step outside.")
await sleep(4)
await sing("And I take a deep breath and get real high.")
await sleep(3)
await sing("And I, scream from the top of my lungs, what's going on?!")
await sleep(6)
await sing("And I say HEY YEAH YEAH YEAH YEAH, HEY YEAH YEAH, I SAY HEY, WHAT'S GOING ON!?")
await sleep(12)
await sing("And I say, HEY YEAH YEAH YEAH YEAH, HEY YEAH YEAH, I SAY HEY, WHAT'S GOING ON!?")
await sleep(12)
await sing("And he tries.")
await sleep(3)
await sing("Oh my god do I try.")
await sleep(4)
await sing("I try all the time, in this institution.")
await sleep(7)
await sing("And he prays!")
await sleep(3)
await sing("Oh my god do I pray.")
await sleep(3)
await sing("I pray every single day!")
await sleep(3)
await sing("MYAAAH")
await sing("FOR A REVOLUTION!")
await sleep(3)
await sing("And I say, HEY YEAH YEAH YEAH YEAH, HEY YEAH YEAH, I SAY HEY, WHAT'S GOING ON!?")
await sleep(12)
await sing("And I say, HEY YEAH YEAH YEAH YEAH, HEY YEAH YEAH, I SAY HEY, WHAT'S GOING ON!?")
def setup(bot):
bot.add_cog(lyriccommands(bot)) | unknown | codeparrot/codeparrot-clean | ||
# This is a pretty brittle way to get the assembly reference
sys.path.append("C:\\Program Files\\National Instruments\\MeasurementStudioVS2005\\DotNET\\Assemblies\\Current")
clr.AddReference("NationalInstruments.DAQmx.dll")
from NationalInstruments.DAQmx import *
from DAQ.Environment import *
from math import *
def readInput(t, sampleRate, numOfSamples):
t.Timing.ConfigureSampleClock("", sampleRate, SampleClockActiveEdge.Rising, SampleQuantityMode.FiniteSamples, numOfSamples);
reader = AnalogSingleChannelReader(t.Stream);
valArray = reader.ReadMultiSample(numOfSamples);
t.Control(TaskAction.Unreserve);
return sum(valArray) / len(valArray)
def sniff(channel, numOfSamples, sampleRate, displayEvery):
# set up the magnetomter input
t = Task("SnifferInput")
ch = Environs.Hardware.AnalogInputChannels[channel]
ch.AddToTask(t,-10,10)
t.Control(TaskAction.Verify)
vals = []
i = 0
hc.SwitchEAndWait(False)
while True:
v1 = readInput(t, sampleRate, numOfSamples)
hc.SwitchEAndWait()
v2 = readInput(t, sampleRate, numOfSamples)
vals.Append(v1 - v2)
i = i + 1
if ((i % displayEvery) == 0):
mn = sum(vals) / len(vals)
se = sqrt(sum((x - mn)**2 for x in vals)) / len(vals)
print "i: " + str(i) + "\tMean: " + str(1E6 * mn) + "uV\tS.E: " + str(1E6 * se) + "uV"
t.Dispose()
return va
def sniff1():
sniff("magnetometer", 1000, 1000, 5)
def run_script():
print """Field sniffer (tm). Measures an analog input a number of times, throws the
e-switch and repeats. Collects statistics on the difference in one e-state to
the other. The e-switch parameters are taken from EDMHardwareController. So,
if, for instance, you just want to test the relays with the HV off, you can
set most of the delays in hardware controller to zero to speed things up. You
can easily get above 1Hz this way.
usage: sniff(channel, numOfSamples, sampleRate, displayEvery)
- channel is the name of an analog input in the Hardware class (i.e.
"magnetomter" or "miniFlux1" - case sensitive!).
- numOfSamples is the number of samples taken between each e-switch.
- sampleRate is the rate at which these samples are taken.
- displayEvery governs after how many e-switch pairs the statistics
are updated
So, for example, sniff("magnetometer", 1000, 1000, 5) will measure for one
second at one kHz, reverse the field and repeat. It will display updated
statistics every five seconds.
""" | unknown | codeparrot/codeparrot-clean | ||
# Python test set -- built-in functions
import test.test_support, unittest
import sys
import pickle
import itertools
import warnings
warnings.filterwarnings("ignore", "integer argument expected",
DeprecationWarning, "unittest")
# pure Python implementations (3 args only), for comparison
def pyrange(start, stop, step):
if (start - stop) // step < 0:
# replace stop with next element in the sequence of integers
# that are congruent to start modulo step.
stop += (start - stop) % step
while start != stop:
yield start
start += step
def pyrange_reversed(start, stop, step):
stop += (start - stop) % step
return pyrange(stop - step, start - step, -step)
class XrangeTest(unittest.TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.izip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail('{}: iterator ended unexpectedly '
'at position {}; expected {}'.format(test_id, i, y))
elif y == sentinel:
self.fail('{}: unexpected excess element {} at '
'position {}'.format(test_id, x, i))
else:
self.fail('{}: wrong element at position {};'
'expected {}, got {}'.format(test_id, i, y, x))
def test_xrange(self):
self.assertEqual(list(xrange(3)), [0, 1, 2])
self.assertEqual(list(xrange(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(xrange(0)), [])
self.assertEqual(list(xrange(-3)), [])
self.assertEqual(list(xrange(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(xrange(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(xrange(a, a+2)), [a, a+1])
self.assertEqual(list(xrange(a+2, a, -1L)), [a+2, a+1])
self.assertEqual(list(xrange(a+4, a, -2)), [a+4, a+2])
seq = list(xrange(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(xrange(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(xrange(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, xrange)
self.assertRaises(TypeError, xrange, 1, 2, 3, 4)
self.assertRaises(ValueError, xrange, 1, 2, 0)
self.assertRaises(OverflowError, xrange, 10**100, 10**101, 10**101)
self.assertRaises(TypeError, xrange, 0, "spam")
self.assertRaises(TypeError, xrange, 0, 42, "spam")
self.assertEqual(len(xrange(0, sys.maxint, sys.maxint-1)), 2)
self.assertRaises(OverflowError, xrange, -sys.maxint, sys.maxint)
self.assertRaises(OverflowError, xrange, 0, 2*sys.maxint)
r = xrange(-sys.maxint, sys.maxint, 2)
self.assertEqual(len(r), sys.maxint)
self.assertRaises(OverflowError, xrange, -sys.maxint-1, sys.maxint, 2)
def test_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
r = xrange(*t)
self.assertEquals(list(pickle.loads(pickle.dumps(r, proto))),
list(r))
def test_range_iterators(self):
# see issue 7298
limits = [base + jiggle
for M in (2**32, 2**64)
for base in (-M, -M//2, 0, M//2, M)
for jiggle in (-2, -1, 0, 1, 2)]
test_ranges = [(start, end, step)
for start in limits
for end in limits
for step in (-2**63, -2**31, -2, -1, 1, 2)]
for start, end, step in test_ranges:
try:
iter1 = xrange(start, end, step)
except OverflowError:
pass
else:
iter2 = pyrange(start, end, step)
test_id = "xrange({}, {}, {})".format(start, end, step)
# check first 100 entries
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
try:
iter1 = reversed(xrange(start, end, step))
except OverflowError:
pass
else:
iter2 = pyrange_reversed(start, end, step)
test_id = "reversed(xrange({}, {}, {}))".format(start, end, step)
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
def test_main():
test.test_support.run_unittest(XrangeTest)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MODERNIZE_REPLACERANDOMSHUFFLECHECK_H
#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MODERNIZE_REPLACERANDOMSHUFFLECHECK_H
#include "../ClangTidyCheck.h"
#include "../utils/IncludeInserter.h"
namespace clang::tidy::modernize {
/// std::random_shuffle will be removed as of C++17. This check will find and
/// replace all occurrences of std::random_shuffle with std::shuffle.
///
/// For the user-facing documentation see:
/// https://clang.llvm.org/extra/clang-tidy/checks/modernize/replace-random-shuffle.html
class ReplaceRandomShuffleCheck : public ClangTidyCheck {
public:
ReplaceRandomShuffleCheck(StringRef Name, ClangTidyContext *Context);
void registerPPCallbacks(const SourceManager &SM, Preprocessor *PP,
Preprocessor *ModuleExpanderPP) override;
bool isLanguageVersionSupported(const LangOptions &LangOpts) const override {
return LangOpts.CPlusPlus11;
}
void storeOptions(ClangTidyOptions::OptionMap &Opts) override;
void registerMatchers(ast_matchers::MatchFinder *Finder) override;
void check(const ast_matchers::MatchFinder::MatchResult &Result) override;
private:
utils::IncludeInserter IncludeInserter;
};
} // namespace clang::tidy::modernize
#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MODERNIZE_REPLACERANDOMSHUFFLECHECK_H | c | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/modernize/ReplaceRandomShuffleCheck.h |
---
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-cluster.html
applies_to:
deployment:
ess:
self:
---
# Cluster-level shard allocation and routing settings [modules-cluster]
Shard allocation is the process of assigning shard copies to nodes. This can happen during initial recovery, replica allocation, rebalancing, when nodes are added to or removed from the cluster, or when cluster or index settings that impact allocation are updated.
One of the main roles of the master is to decide which shards to allocate to which nodes, and when to move shards between nodes in order to rebalance the cluster.
There are a number of settings available to control the shard allocation process:
* [Cluster-level shard allocation settings](#cluster-shard-allocation-settings) control allocation and rebalancing operations.
* [Disk-based shard allocation settings](#disk-based-shard-allocation) explains how Elasticsearch takes available disk space into account, and the related settings.
* [Shard allocation awareness](docs-content://deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md) and [Forced awareness](docs-content://deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md#forced-awareness) control how shards can be distributed across different racks or availability zones.
* [Cluster-level shard allocation filtering](#cluster-shard-allocation-filtering) allows certain nodes or groups of nodes excluded from allocation so that they can be decommissioned.
* [Cluster-level node allocation stats cache settings](#node-allocation-stats-cache) control the node allocation statistics cache on the master node.
Besides these, there are a few other [miscellaneous cluster-level settings](/reference/elasticsearch/configuration-reference/miscellaneous-cluster-settings.md).
## Cluster-level shard allocation settings [cluster-shard-allocation-settings]
You can use the following settings to control shard allocation and recovery:
$$$cluster-routing-allocation-enable$$$
`cluster.routing.allocation.enable`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Enable or disable allocation for specific kinds of shards:
* `all` - (default) Allows shard allocation for all kinds of shards.
* `primaries` - Allows shard allocation only for primary shards.
* `new_primaries` - Allows shard allocation only for primary shards for new indices.
* `none` - No shard allocations of any kind are allowed for any indices.
This setting only affects future allocations, and does not re-allocate or un-allocate currently allocated shards. It also does not affect the recovery of local primary shards when restarting a node. A restarted node that has a copy of an unassigned primary shard will recover that primary immediately, assuming that its allocation id matches one of the active allocation ids in the cluster state.
$$$cluster-routing-allocation-same-shard-host$$$
`cluster.routing.allocation.same_shard.host`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) If `true`, forbids multiple copies of a shard from being allocated to distinct nodes on the same host, i.e. which have the same network address. Defaults to `false`, meaning that copies of a shard may sometimes be allocated to nodes on the same host. This setting is only relevant if you run multiple nodes on each host.
`cluster.routing.allocation.node_concurrent_incoming_recoveries`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) How many concurrent incoming shard recoveries are allowed to happen on a node. Incoming recoveries are the recoveries where the target shard (most likely the replica unless a shard is relocating) is allocated on the node. Defaults to `2`. Increasing this setting may cause shard movements to have a performance impact on other activity in your cluster, but may not make shard movements complete noticeably sooner. We do not recommend adjusting this setting from its default of `2`.
`cluster.routing.allocation.node_concurrent_outgoing_recoveries`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) How many concurrent outgoing shard recoveries are allowed to happen on a node. Outgoing recoveries are the recoveries where the source shard (most likely the primary unless a shard is relocating) is allocated on the node. Defaults to `2`. Increasing this setting may cause shard movements to have a performance impact on other activity in your cluster, but may not make shard movements complete noticeably sooner. We do not recommend adjusting this setting from its default of `2`.
`cluster.routing.allocation.node_concurrent_recoveries`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) A shortcut to set both `cluster.routing.allocation.node_concurrent_incoming_recoveries` and `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. The value of this setting takes effect only when the more specific setting is not configured. Defaults to `2`. Increasing this setting may cause shard movements to have a performance impact on other activity in your cluster, but may not make shard movements complete noticeably sooner. We do not recommend adjusting this setting from its default of `2`.
`cluster.routing.allocation.node_initial_primaries_recoveries`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) While the recovery of replicas happens over the network, the recovery of an unassigned primary after node restart uses data from the local disk. These should be fast so more initial primary recoveries can happen in parallel on each node. Defaults to `4`. Increasing this setting may cause shard recoveries to have a performance impact on other activity in your cluster, but may not make shard recoveries complete noticeably sooner. We do not recommend adjusting this setting from its default of `4`.
## Shard rebalancing settings [shards-rebalancing-settings]
A cluster is *balanced* when it has an equal number of shards on each node, with all nodes needing equal resources, without having a concentration of shards from any index on any node. {{es}} runs an automatic process called *rebalancing* which moves shards between the nodes in your cluster to improve its balance. Rebalancing obeys all other shard allocation rules such as [allocation filtering](#cluster-shard-allocation-filtering) and [forced awareness](docs-content://deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md#forced-awareness) which may prevent it from completely balancing the cluster. In that case, rebalancing strives to achieve the most balanced cluster possible within the rules you have configured. If you are using [data tiers](docs-content://manage-data/lifecycle/data-tiers.md) then {{es}} automatically applies allocation filtering rules to place each shard within the appropriate tier. These rules mean that the balancer works independently within each tier.
You can use the following settings to control the rebalancing of shards across the cluster:
`cluster.routing.allocation.allow_rebalance`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Specify when shard rebalancing is allowed:
* `always` - (default) Always allow rebalancing.
* `indices_primaries_active` - Only when all primaries in the cluster are allocated.
* `indices_all_active` - Only when all shards (primaries and replicas) in the cluster are allocated.
`cluster.routing.rebalance.enable`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Enable or disable rebalancing for specific kinds of shards:
* `all` - (default) Allows shard balancing for all kinds of shards.
* `primaries` - Allows shard balancing only for primary shards.
* `replicas` - Allows shard balancing only for replica shards.
* `none` - No shard balancing of any kind are allowed for any indices.
Rebalancing is important to ensure the cluster returns to a healthy and fully resilient state after a disruption. If you adjust this setting, remember to set it back to `all` as soon as possible.
`cluster.routing.allocation.cluster_concurrent_rebalance`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Defines the number of concurrent shard rebalances are allowed across the whole cluster. Defaults to `2`. Note that this setting only controls the number of concurrent shard relocations due to imbalances in the cluster. This setting does not limit shard relocations due to [allocation filtering](#cluster-shard-allocation-filtering) or [forced awareness](docs-content://deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md#forced-awareness). Increasing this setting may cause the cluster to use additional resources moving shards between nodes, so we generally do not recommend adjusting this setting from its default of `2`.
`cluster.routing.allocation.type`
: ([Static](docs-content://deploy-manage/stack-settings.md#static-cluster-setting)) Selects the algorithm used for computing the cluster balance. Defaults to `desired_balance` which selects the *desired balance allocator*. This allocator runs a background task which computes the desired balance of shards in the cluster. Once this background task completes, {{es}} moves shards to their desired locations.
:::{admonition} Deprecated in 8.8
May also be set to `balanced` to select the legacy *balanced allocator*. This allocator was the default allocator in versions of {{es}} before 8.6.0. It runs in the foreground, preventing the master from doing other work in parallel. It works by selecting a small number of shard movements which immediately improve the balance of the cluster, and when those shard movements complete it runs again and selects another few shards to move. Since this allocator makes its decisions based only on the current state of the cluster, it will sometimes move a shard several times while balancing the cluster.
:::
## Shard balancing heuristics settings [shards-rebalancing-heuristics]
Rebalancing works by computing a *weight* for each node based on its allocation of shards, and then moving shards between nodes to reduce the weight of the heavier nodes and increase the weight of the lighter ones. The cluster is balanced when there is no possible shard movement that can bring the weight of any node closer to the weight of any other node by more than a configurable threshold.
The weight of a node depends on the number of shards it holds and on the total estimated resource usage of those shards expressed in terms of the size of the shard on disk and the number of threads needed to support write traffic to the shard. {{es}} estimates the resource usage of shards belonging to data streams when they are created by a rollover. The estimated disk size of the new shard is the mean size of the other shards in the data stream. The estimated write load of the new shard is a weighted average of the actual write loads of recent shards in the data stream. Shards that do not belong to the write index of a data stream have an estimated write load of zero.
The following settings control how {{es}} combines these values into an overall measure of each node’s weight.
`cluster.routing.allocation.balance.threshold`
: (float, [Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) The minimum improvement in weight which triggers a rebalancing shard movement. Defaults to `1.0f`. Raising this value will cause {{es}} to stop rebalancing shards sooner, leaving the cluster in a more unbalanced state.
`cluster.routing.allocation.balance.shard`
: (float, [Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Defines the weight factor for the total number of shards allocated to each node. Defaults to `0.45f`. Raising this value increases the tendency of {{es}} to equalize the total number of shards across nodes ahead of the other balancing variables.
`cluster.routing.allocation.balance.index`
: (float, [Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Defines the weight factor for the number of shards per index allocated to each node. Defaults to `0.55f`. Raising this value increases the tendency of {{es}} to equalize the number of shards of each index across nodes ahead of the other balancing variables.
`cluster.routing.allocation.balance.disk_usage`
: (float, [Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Defines the weight factor for balancing shards according to their predicted disk size in bytes. Defaults to `2e-11f`. Raising this value increases the tendency of {{es}} to equalize the total disk usage across nodes ahead of the other balancing variables.
`cluster.routing.allocation.balance.write_load`
: (float, [Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Defines the weight factor for the write load of each shard, in terms of the estimated number of indexing threads needed by the shard. Defaults to `10.0f`. Raising this value increases the tendency of {{es}} to equalize the total write load across nodes ahead of the other balancing variables.
::::{note}
* If you have a large cluster, it may be unnecessary to keep it in a perfectly balanced state at all times. It is less resource-intensive for the cluster to operate in a somewhat unbalanced state rather than to perform all the shard movements needed to achieve the perfect balance. If so, increase the value of `cluster.routing.allocation.balance.threshold` to define the acceptable imbalance between nodes. For instance, if you have an average of 500 shards per node and can accept a difference of 5% (25 typical shards) between nodes, set `cluster.routing.allocation.balance.threshold` to `25`.
* We do not recommend adjusting the values of the heuristic weight factor settings. The default values work well in all reasonable clusters. Although different values may improve the current balance in some ways, it is possible that they will create unexpected problems in the future or prevent it from gracefully handling an unexpected disruption.
* Regardless of the result of the balancing algorithm, rebalancing might not be allowed due to allocation rules such as forced awareness and allocation filtering. Use the [Cluster allocation explain](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) API to explain the current allocation of shards.
::::
## Disk-based shard allocation settings [disk-based-shard-allocation]
$$$disk-based-shard-allocation-description$$$
The disk-based shard allocator ensures that all nodes have enough disk space without performing more shard movements than necessary. It allocates shards based on a pair of thresholds known as the *low watermark* and the *high watermark*. Its primary goal is to ensure that no node exceeds the high watermark, or at least that any such overage is only temporary. If a node exceeds the high watermark then {{es}} will solve this by moving some of its shards onto other nodes in the cluster.
::::{note}
It is normal for nodes to temporarily exceed the high watermark from time to time.
::::
The allocator also tries to keep nodes clear of the high watermark by forbidding the allocation of more shards to a node that exceeds the low watermark. Importantly, if all of your nodes have exceeded the low watermark then no new shards can be allocated and {{es}} will not be able to move any shards between nodes in order to keep the disk usage below the high watermark. You must ensure that your cluster has enough disk space in total and that there are always some nodes below the low watermark.
Shard movements triggered by the disk-based shard allocator must also satisfy all other shard allocation rules such as [allocation filtering](#cluster-shard-allocation-filtering) and [forced awareness](docs-content://deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md#forced-awareness). If these rules are too strict then they can also prevent the shard movements needed to keep the nodes' disk usage under control. If you are using [data tiers](docs-content://manage-data/lifecycle/data-tiers.md) then {{es}} automatically configures allocation filtering rules to place shards within the appropriate tier, which means that the disk-based shard allocator works independently within each tier.
If a node is filling up its disk faster than {{es}} can move shards elsewhere then there is a risk that the disk will completely fill up. To prevent this, as a last resort, once the disk usage reaches the *flood-stage* watermark {{es}} will block writes to indices with a shard on the affected node. It will also continue to move shards onto the other nodes in the cluster. When disk usage on the affected node drops below the high watermark, {{es}} automatically removes the write block. Refer to [Fix watermark errors](docs-content://troubleshoot/elasticsearch/fix-watermark-errors.md) to resolve persistent watermark errors.
::::{admonition} Max headroom settings
:class: note
Max headroom settings apply only when watermark settings are percentages or ratios.
A max headroom value is intended to cap the required free disk space before hitting the respective watermark. This is useful for servers with larger disks, where a percentage or ratio watermark could translate to an overly large free disk space requirement. In this case, the max headroom can be used to cap the required free disk space amount.
For example, where `cluster.routing.allocation.disk.watermark.flood_stage` is 95% and `cluster.routing.allocation.disk.watermark.flood_stage.max_headroom` is 100GB, this means that:
* For a smaller disk, e.g., of 100GB, the flood watermark will hit at 95%, meaning at 5GB of free space, since 5GB is smaller than the 100GB max headroom value.
* For a larger disk, e.g., of 100TB, the flood watermark will hit at 100GB of free space. That is because the 95% flood watermark alone would require 5TB of free disk space, but is capped by the max headroom setting to 100GB.
Max headroom settings have their default values only if their respective watermark settings are not explicitly set. If watermarks are explicitly set, then the max headroom settings do not have their default values, and need to be explicitly set if they are needed.
::::
::::{tip}
:name: disk-based-shard-allocation-does-not-balance
It is normal for the nodes in your cluster to be using very different amounts of disk space. The [balance](#shards-rebalancing-settings) of the cluster depends on a combination of factors which includes the number of shards on each node, the indices to which those shards belong, and the resource needs of each shard in terms of its size on disk and its CPU usage. {{es}} must trade off all of these factors against each other, and a cluster which is balanced when looking at the combination of all of these factors may not appear to be balanced if you focus attention on just one of them.
::::
You can use the following settings to control disk-based allocation:
$$$cluster-routing-disk-threshold$$$
`cluster.routing.allocation.disk.threshold_enabled` 
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Defaults to `true`. Set to `false` to disable the disk allocation decider. Upon disabling, it will also remove any existing `index.blocks.read_only_allow_delete` index blocks.
$$$cluster-routing-watermark-low$$$
`cluster.routing.allocation.disk.watermark.low` 
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Controls the low watermark for disk usage. It defaults to `85%`, meaning that {{es}} will not allocate shards to nodes that have more than 85% disk used. It can alternatively be set to a ratio value, e.g., `0.85`. It can also be set to an absolute byte value (like `500mb`) to prevent {{es}} from allocating shards if less than the specified amount of space is available. This setting has no effect on the primary shards of newly-created indices but will prevent their replicas from being allocated.
`cluster.routing.allocation.disk.watermark.low.max_headroom`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Controls the max headroom for the low watermark (in case of a percentage/ratio value). Defaults to 200GB when `cluster.routing.allocation.disk.watermark.low` is not explicitly set. This caps the amount of free space required.
$$$cluster-routing-watermark-high$$$
`cluster.routing.allocation.disk.watermark.high` 
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Controls the high watermark. It defaults to `90%`, meaning that {{es}} will attempt to relocate shards away from a node whose disk usage is above 90%. It can alternatively be set to a ratio value, e.g., `0.9`. It can also be set to an absolute byte value (similarly to the low watermark) to relocate shards away from a node if it has less than the specified amount of free space. This setting affects the allocation of all shards, whether previously allocated or not.
`cluster.routing.allocation.disk.watermark.high.max_headroom`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Controls the max headroom for the high watermark (in case of a percentage/ratio value). Defaults to 150GB when `cluster.routing.allocation.disk.watermark.high` is not explicitly set. This caps the amount of free space required.
`cluster.routing.allocation.disk.watermark.enable_for_single_data_node`
: ([Static](docs-content://deploy-manage/stack-settings.md#static-cluster-setting)) In earlier releases, the default behaviour was to disregard disk watermarks for a single data node cluster when making an allocation decision. This is deprecated behavior since 7.14 and has been removed in 8.0. The only valid value for this setting is now `true`. The setting will be removed in a future release.
$$$cluster-routing-flood-stage$$$
`cluster.routing.allocation.disk.watermark.flood_stage` 
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Controls the flood stage watermark, which defaults to 95%. {{es}} enforces a read-only index block ([`index.blocks.read_only_allow_delete`](/reference/elasticsearch/index-settings/index-block.md)) on every index that has one or more shards allocated on the node, and that has at least one disk exceeding the flood stage. This setting is a last resort to prevent nodes from running out of disk space. The index block is automatically released when the disk utilization falls below the high watermark. Similarly to the low and high watermark values, it can alternatively be set to a ratio value, e.g., `0.95`, or an absolute byte value.
`cluster.routing.allocation.disk.watermark.flood_stage.max_headroom`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Controls the max headroom for the flood stage watermark (in case of a percentage/ratio value). Defaults to 100GB when `cluster.routing.allocation.disk.watermark.flood_stage` is not explicitly set. This caps the amount of free space required.
::::{note}
You can’t mix the usage of percentage/ratio values and byte values across the `cluster.routing.allocation.disk.watermark.low`, `cluster.routing.allocation.disk.watermark.high`, and `cluster.routing.allocation.disk.watermark.flood_stage` settings. Either all values must be set to percentage/ratio values, or all must be set to byte values. This is required so that {{es}} can validate that the settings are internally consistent, ensuring that the low disk threshold is less than the high disk threshold, and the high disk threshold is less than the flood stage threshold. A similar comparison check is done for the max headroom values.
::::
$$$cluster-routing-flood-stage-frozen$$$
`cluster.routing.allocation.disk.watermark.flood_stage.frozen` 
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Controls the flood stage watermark for dedicated frozen nodes, which defaults to 95%.
`cluster.routing.allocation.disk.watermark.flood_stage.frozen.max_headroom` 
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Controls the max headroom for the flood stage watermark (in case of a percentage/ratio value) for dedicated frozen nodes. Defaults to 20GB when `cluster.routing.allocation.disk.watermark.flood_stage.frozen` is not explicitly set. This caps the amount of free space required on dedicated frozen nodes.
`cluster.info.update.interval`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) How often {{es}} should check on disk usage for each node in the cluster. Defaults to `30s`.
::::{note}
Percentage values refer to used disk space, while byte values refer to free disk space. This can be confusing, because it flips the meaning of high and low. For example, it makes sense to set the low watermark to 10gb and the high watermark to 5gb, but not the other way around.
::::
## Shard allocation awareness settings [shard-allocation-awareness-settings]
You can use [custom node attributes](/reference/elasticsearch/configuration-reference/node-settings.md#custom-node-attributes) as *awareness attributes* to enable {{es}} to take your physical hardware configuration into account when allocating shards. If {{es}} knows which nodes are on the same physical server, in the same rack, or in the same zone, it can distribute the primary shard and its replica shards to minimize the risk of losing all shard copies in the event of a failure. [Learn more about shard allocation awareness](docs-content://deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md).
`cluster.routing.allocation.awareness.attributes`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) The node attributes that {{es}} should use as awareness attributes. For example, if you have a `rack_id` attribute that specifies the rack in which each node resides, you can set this setting to `rack_id` to ensure that primary and replica shards are not allocated on the same rack. You can specify multiple attributes as a comma-separated list.
`cluster.routing.allocation.awareness.force.*`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) The shard allocation awareness values that must exist for shards to be reallocated in case of location failure. Learn more about [forced awareness](docs-content://deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md#forced-awareness).
## Cluster-level shard allocation filtering [cluster-shard-allocation-filtering]
You can use cluster-level shard allocation filters to control where {{es}} allocates shards from any index. These cluster wide filters are applied in conjunction with [per-index allocation filtering](/reference/elasticsearch/index-settings/shard-allocation.md) and [allocation awareness](docs-content://deploy-manage/distributed-architecture/shard-allocation-relocation-recovery/shard-allocation-awareness.md).
Shard allocation filters can be based on [custom node attributes](/reference/elasticsearch/configuration-reference/node-settings.md#custom-node-attributes) or the built-in `_name`, `_host_ip`, `_publish_ip`, `_ip`, `_host`, `_id` and `_tier` attributes.
The `cluster.routing.allocation` settings are [Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting), enabling live indices to be moved from one set of nodes to another. Shards are only relocated if it is possible to do so without breaking another routing constraint, such as never allocating a primary and replica shard on the same node.
The most common use case for cluster-level shard allocation filtering is when you want to decommission a node. To move shards off of a node prior to shutting it down, you could create a filter that excludes the node by its IP address:
```console
PUT _cluster/settings
{
"persistent" : {
"cluster.routing.allocation.exclude._ip" : "10.0.0.1"
}
}
```
% TEST[skip:TODO]
### Cluster routing settings [cluster-routing-settings]
`cluster.routing.allocation.include.{{attribute}}`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Allocate shards to a node whose `{{attribute}}` has at least one of the comma-separated values.
`cluster.routing.allocation.require.{{attribute}}`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Only allocate shards to a node whose `{{attribute}}` has *all* of the comma-separated values.
`cluster.routing.allocation.exclude.{{attribute}}`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Do not allocate shards to a node whose `{{attribute}}` has *any* of the comma-separated values.
The cluster allocation settings support the following built-in attributes:
`_name`
: Match nodes by node name
`_host_ip`
: Match nodes by host IP address (IP associated with hostname)
`_publish_ip`
: Match nodes by publish IP address
`_ip`
: Match either `_host_ip` or `_publish_ip`
`_host`
: Match nodes by hostname
`_id`
: Match nodes by node id
`_tier`
: Match nodes by the node’s [data tier](docs-content://manage-data/lifecycle/data-tiers.md) role
::::{note}
`_tier` filtering is based on [node](/reference/elasticsearch/configuration-reference/node-settings.md) roles. Only a subset of roles are [data tier](docs-content://manage-data/lifecycle/data-tiers.md) roles, and the generic [data role](docs-content://deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md#data-node-role) will match any tier filtering. a subset of roles that are [data tier](docs-content://manage-data/lifecycle/data-tiers.md) roles, but the generic [data role](docs-content://deploy-manage/distributed-architecture/clusters-nodes-shards/node-roles.md#data-node-role) will match any tier filtering.
::::
You can use wildcards when specifying attribute values, for example:
```console
PUT _cluster/settings
{
"persistent": {
"cluster.routing.allocation.exclude._ip": "192.168.2.*"
}
}
```
% TEST[skip:TODO]
## Node allocation stats cache [node-allocation-stats-cache]
`cluster.routing.allocation.stats.cache.ttl` {applies_to}`stack: ga 9.1`
: ([Dynamic](docs-content://deploy-manage/stack-settings.md#dynamic-cluster-setting)) Calculating the node allocation stats for a [Get node statistics API call](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats) can become expensive on the master for clusters with a high number of nodes. To prevent overloading the master the node allocation stats are cached on the master for 1 minute `1m` by default. This setting can be used to adjust the cache time to live value, if necessary, keeping in mind the tradeoff between the freshness of the statistics and the processing costs on the master. The cache can be disabled (not recommended) by setting the value to `0s` (the minimum value). The maximum value is 10 minutes `10m`. | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/elasticsearch/configuration-reference/cluster-level-shard-allocation-routing-settings.md |
# $Id: confbot.py 2912 2009-08-24 11:56:13Z bennylp $
#
# SIP Conference Bot
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import pjsua as pj
import string
import sys
CFG_FILE = "config"
INFO = 1
TRACE = 2
# Call callback. This would just forward the event to the Member class
class CallCb(pj.CallCallback):
def __init__(self, member, call=None):
pj.CallCallback.__init__(self, call)
self.member = member
def on_state(self):
self.member.on_call_state(self.call)
def on_media_state(self):
self.member.on_call_media_state(self.call)
def on_dtmf_digit(self, digits):
self.member.on_call_dtmf_digit(self.call, digits)
def on_transfer_request(self, dst, code):
return self.member.on_call_transfer_request(self.call, dst, code)
def on_transfer_status(self, code, reason, final, cont):
return self.member.on_call_transfer_status(self.call, code, reason, final, cont)
def on_replace_request(self, code, reason):
return self.member.on_call_replace_request(self.call, code, reason)
def on_replaced(self, new_call):
self.member.on_call_replaced(self.call, new_call)
def on_typing(self, is_typing):
self.member.on_typing(is_typing, call=self.call)
def on_pager(self, mime_type, body):
self.member.on_pager(mime_type, body, call=self.call)
def on_pager_status(self, body, im_id, code, reason):
self.member.on_pager_status(body, im_id, code, reason, call=self.call)
# Buddy callback. This would just forward the event to Member class
class BuddyCb(pj.BuddyCallback):
def __init__(self, member, buddy=None):
pj.BuddyCallback.__init__(self, buddy)
self.member = member
def on_pager(self, mime_type, body):
self.member.on_pager(mime_type, body, buddy=self.buddy)
def on_pager_status(self, body, im_id, code, reason):
self.member.on_pager_status(body, im_id, code, reason, buddy=self.buddy)
def on_state(self):
self.member.on_pres_state(self.buddy)
def on_typing(self, is_typing):
self.member.on_typing(is_typing, buddy=self.buddy)
##############################################################################
#
#
# This class represents individual room member (either/both chat and voice conf)
#
#
class Member:
def __init__(self, bot, uri):
self.uri = uri
self.bot = bot
self.call = None
self.buddy = None
self.bi = pj.BuddyInfo()
self.in_chat = False
self.in_voice = False
self.im_error = False
self.html = False
def __str__(self):
str = string.ljust(self.uri, 30) + " -- "
if self.buddy:
bi = self.buddy.info()
str = str + bi.online_text
else:
str = str + "Offline"
str = str + " ["
if (self.in_voice):
str = str + " voice"
if (self.in_chat):
str = str + " chat"
if (self.html):
str = str + " html"
else:
str = str + " plain"
if (self.im_error):
str = str + " im_error"
str = str + "]"
return str
def join_call(self, call):
if self.call:
self.call.hangup(603, "You have been disconnected for making another call")
self.call = call
call.set_callback(CallCb(self, call))
msg = "%(uri)s is attempting to join the voice conference" % \
{'uri': self.uri}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(None, msg)
def join_chat(self):
if not self.buddy:
self.bot.DEBUG(self.uri + " joining chatroom...\n", INFO)
self.buddy = self.bot.acc.add_buddy(self.uri)
self.buddy.set_callback(BuddyCb(self, self.buddy))
self.buddy.subscribe()
else:
self.bot.DEBUG(self.uri + " already in chatroom, resubscribing..\n", INFO)
self.buddy.subscribe()
def send_pager(self, body, mime="text/plain"):
self.bot.DEBUG("send_pager() to " + self.uri)
if self.in_chat and not self.im_error and self.buddy:
if self.html:
#This will make us receive html!
#mime = "text/html"
body = body.replace("<", "<")
body = body.replace(">", ">")
body = body.replace('"', """)
body = body.replace("\n", "<BR>\n")
self.buddy.send_pager(body, content_type=mime)
self.bot.DEBUG("..sent\n")
else:
self.bot.DEBUG("..not sent!\n")
def on_call_state(self, call):
ci = call.info()
if ci.state==pj.CallState.DISCONNECTED:
if self.in_voice:
msg = "%(uri)s has left the voice conference (%(1)d/%(2)s)" % \
{'uri': self.uri, '1': ci.last_code, '2': ci.last_reason}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(None, msg)
self.in_voice = False
self.call = None
self.bot.on_member_left(self)
elif ci.state==pj.CallState.CONFIRMED:
msg = "%(uri)s has joined the voice conference" % \
{'uri': self.uri}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(None, msg)
def on_call_media_state(self, call):
self.bot.DEBUG("Member.on_call_media_state\n")
ci = call.info()
if ci.conf_slot!=-1:
if not self.in_voice:
msg = self.uri + " call media is active"
self.bot.broadcast_pager(None, msg)
self.in_voice = True
self.bot.add_to_voice_conf(self)
else:
if self.in_voice:
msg = self.uri + " call media is inactive"
self.bot.broadcast_pager(None, msg)
self.in_voice = False
def on_call_dtmf_digit(self, call, digits):
msg = "%(uri)s sent DTMF digits %(dig)s" % \
{'uri': self.uri, 'dig': digits}
self.bot.broadcast_pager(None, msg)
def on_call_transfer_request(self, call, dst, code):
msg = "%(uri)s is transfering the call to %(dst)s" % \
{'uri': self.uri, 'dst': dst}
self.bot.broadcast_pager(None, msg)
return 202
def on_call_transfer_status(self, call, code, reason, final, cont):
msg = "%(uri)s call transfer status is %(code)d/%(res)s" % \
{'uri': self.uri, 'code': code, 'res': reason}
self.bot.broadcast_pager(None, msg)
return True
def on_call_replace_request(self, call, code, reason):
msg = "%(uri)s is requesting call replace" % \
{'uri': self.uri}
self.bot.broadcast_pager(None, msg)
return (code, reason)
def on_call_replaced(self, call, new_call):
msg = "%(uri)s call is replaced" % \
{'uri': self.uri}
self.bot.broadcast_pager(None, msg)
def on_pres_state(self, buddy):
old_bi = self.bi
self.bi = buddy.info()
msg = "%(uri)s status is %(st)s" % \
{'uri': self.uri, 'st': self.bi.online_text}
self.bot.DEBUG(msg + "\n", INFO)
self.bot.broadcast_pager(self, msg)
if self.bi.sub_state==pj.SubscriptionState.ACTIVE:
if not self.in_chat:
self.in_chat = True
buddy.send_pager("Welcome to chatroom")
self.bot.broadcast_pager(self, self.uri + " has joined the chat room")
else:
self.in_chat = True
elif self.bi.sub_state==pj.SubscriptionState.NULL or \
self.bi.sub_state==pj.SubscriptionState.TERMINATED or \
self.bi.sub_state==pj.SubscriptionState.UNKNOWN:
self.buddy.delete()
self.buddy = None
if self.in_chat:
self.in_chat = False
self.bot.broadcast_pager(self, self.uri + " has left the chat room")
else:
self.in_chat = False
self.bot.on_member_left(self)
def on_typing(self, is_typing, call=None, buddy=None):
if is_typing:
msg = self.uri + " is typing..."
else:
msg = self.uri + " has stopped typing"
self.bot.broadcast_pager(self, msg)
def on_pager(self, mime_type, body, call=None, buddy=None):
if not self.bot.handle_cmd(self, None, body):
msg = self.uri + ": " + body
self.bot.broadcast_pager(self, msg, mime_type)
def on_pager_status(self, body, im_id, code, reason, call=None, buddy=None):
self.im_error = (code/100 != 2)
##############################################################################
#
#
# The Bot instance (singleton)
#
#
class Bot(pj.AccountCallback):
def __init__(self):
pj.AccountCallback.__init__(self, None)
self.lib = pj.Lib()
self.acc = None
self.calls = []
self.members = {}
self.cfg = None
def DEBUG(self, msg, level=TRACE):
print msg,
def helpstring(self):
return """
--h[elp] Display this help screen
--j[oin] Join the chat room
--html on|off Set to receive HTML or plain text
Participant commands:
--s[how] Show confbot settings
--leave Leave the chatroom
--l[ist] List all members
Admin commands:
--a[dmin] <CMD> Where <CMD> are:
list List the admins
add <URI> Add URI as admin
del <URI> Remove URI as admin
rr Reregister account to server
call <URI> Make call to the URI and add to voice conf
dc <URI> Disconnect call to URI
hold <URI> Hold call with that URI
update <URI> Send UPDATE to call with that URI
reinvite <URI> Send re-INVITE to call with that URI
"""
def listmembers(self):
msg = ""
for uri, m in self.members.iteritems():
msg = msg + str(m) + "\n"
return msg
def showsettings(self):
ai = self.acc.info()
msg = """
ConfBot status and settings:
URI: %(uri)s
Status: %(pres)s
Reg Status: %(reg_st)d
Reg Reason: %(reg_res)s
""" % {'uri': ai.uri, 'pres': ai.online_text, \
'reg_st': ai.reg_status, 'reg_res': ai.reg_reason}
return msg
def main(self, cfg_file):
try:
cfg = self.cfg = __import__(cfg_file)
self.lib.init(ua_cfg=cfg.ua_cfg, log_cfg=cfg.log_cfg, media_cfg=cfg.media_cfg)
self.lib.set_null_snd_dev()
transport = None
if cfg.udp_cfg:
transport = self.lib.create_transport(pj.TransportType.UDP, cfg.udp_cfg)
if cfg.tcp_cfg:
t = self.lib.create_transport(pj.TransportType.TCP, cfg.tcp_cfg)
if not transport:
transport = t
self.lib.start()
if cfg.acc_cfg:
self.DEBUG("Creating account %(uri)s..\n" % {'uri': cfg.acc_cfg.id}, INFO)
self.acc = self.lib.create_account(cfg.acc_cfg, cb=self)
else:
self.DEBUG("Creating account for %(t)s..\n" % \
{'t': transport.info().description}, INFO)
self.acc = self.lib.create_account_for_transport(transport, cb=self)
self.acc.set_basic_status(True)
# Wait for ENTER before quitting
print "Press q to quit or --help/--h for help"
while True:
input = sys.stdin.readline().strip(" \t\r\n")
if not self.handle_cmd(None, None, input):
if input=="q":
break
self.lib.destroy()
self.lib = None
except pj.Error, e:
print "Exception: " + str(e)
if self.lib:
self.lib.destroy()
self.lib = None
def broadcast_pager(self, exclude_member, body, mime_type="text/plain"):
self.DEBUG("Broadcast: " + body + "\n")
for uri, m in self.members.iteritems():
if m != exclude_member:
m.send_pager(body, mime_type)
def add_to_voice_conf(self, member):
if not member.call:
return
src_ci = member.call.info()
self.DEBUG("bot.add_to_voice_conf\n")
for uri, m in self.members.iteritems():
if m==member:
continue
if not m.call:
continue
dst_ci = m.call.info()
if dst_ci.media_state==pj.MediaState.ACTIVE and dst_ci.conf_slot!=-1:
self.lib.conf_connect(src_ci.conf_slot, dst_ci.conf_slot)
self.lib.conf_connect(dst_ci.conf_slot, src_ci.conf_slot)
def on_member_left(self, member):
if not member.call and not member.buddy:
del self.members[member.uri]
del member
def handle_admin_cmd(self, member, body):
if member and self.cfg.admins and not member.uri in self.cfg.admins:
member.send_pager("You are not admin")
return
args = body.split()
msg = ""
if len(args)==1:
args.append(" ")
if args[1]=="list":
if not self.cfg.admins:
msg = "Everyone is admin!"
else:
msg = str(self.cfg.admins)
elif args[1]=="add":
if len(args)!=3:
msg = "Usage: add <URI>"
else:
self.cfg.admins.append(args[2])
msg = args[2] + " added as admin"
elif args[1]=="del":
if len(args)!=3:
msg = "Usage: del <URI>"
elif args[2] not in self.cfg.admins:
msg = args[2] + " is not admin"
else:
self.cfg.admins.remove(args[2])
msg = args[2] + " has been removed from admins"
elif args[1]=="rr":
msg = "Reregistering.."
self.acc.set_registration(True)
elif args[1]=="call":
if len(args)!=3:
msg = "Usage: call <URI>"
else:
uri = args[2]
try:
call = self.acc.make_call(uri)
except pj.Error, e:
msg = "Error: " + str(e)
call = None
if call:
if not uri in self.members:
m = Member(self, uri)
self.members[m.uri] = m
else:
m = self.members[uri]
msg = "Adding " + m.uri + " to voice conference.."
m.join_call(call)
elif args[1]=="dc" or args[1]=="hold" or args[1]=="update" or args[1]=="reinvite":
if len(args)!=3:
msg = "Usage: " + args[1] + " <URI>"
else:
uri = args[2]
if not uri in self.members:
msg = "Member not found/URI doesn't match (note: case matters!)"
else:
m = self.members[uri]
if m.call:
if args[1]=="dc":
msg = "Disconnecting.."
m.call.hangup(603, "You're disconnected by admin")
elif args[1]=="hold":
msg = "Holding the call"
m.call.hold()
elif args[1]=="update":
msg = "Sending UPDATE"
m.call.update()
elif args[1]=="reinvite":
msg = "Sending re-INVITE"
m.call.reinvite()
else:
msg = "He is not in call"
else:
msg = "Unknown admin command " + body
#print "msg is '%(msg)s'" % {'msg': msg}
if True:
if member:
member.send_pager(msg)
else:
print msg
def handle_cmd(self, member, from_uri, body):
body = body.strip(" \t\r\n")
msg = ""
handled = True
if body=="--l" or body=="--list":
msg = self.listmembers()
if msg=="":
msg = "Nobody is here"
elif body[0:3]=="--s":
msg = self.showsettings()
elif body[0:6]=="--html" and member:
if body[8:11]=="off":
member.html = False
else:
member.html = True
elif body=="--h" or body=="--help":
msg = self.helpstring()
elif body=="--leave":
if not member or not member.buddy:
msg = "You are not in chatroom"
else:
member.buddy.unsubscribe()
elif body[0:3]=="--j":
if not from_uri in self.members:
m = Member(self, from_uri)
self.members[m.uri] = m
self.DEBUG("Adding " + m.uri + " to chatroom\n")
m.join_chat()
else:
m = self.members[from_uri]
self.DEBUG("Adding " + m.uri + " to chatroom\n")
m.join_chat()
elif body[0:3]=="--a":
self.handle_admin_cmd(member, body)
handled = True
else:
handled = False
if msg:
if member:
member.send_pager(msg)
elif from_uri:
self.acc.send_pager(from_uri, msg);
else:
print msg
return handled
def on_incoming_call(self, call):
self.DEBUG("on_incoming_call from %(uri)s\n" % {'uri': call.info().remote_uri}, INFO)
ci = call.info()
if not ci.remote_uri in self.members:
m = Member(self, ci.remote_uri)
self.members[m.uri] = m
m.join_call(call)
else:
m = self.members[ci.remote_uri]
m.join_call(call)
call.answer(200)
def on_incoming_subscribe(self, buddy, from_uri, contact_uri, pres_obj):
self.DEBUG("on_incoming_subscribe from %(uri)s\n" % from_uri, INFO)
return (200, 'OK')
def on_reg_state(self):
ai = self.acc.info()
self.DEBUG("Registration state: %(code)d/%(reason)s\n" % \
{'code': ai.reg_status, 'reason': ai.reg_reason}, INFO)
if ai.reg_status/100==2 and ai.reg_expires > 0:
self.acc.set_basic_status(True)
def on_pager(self, from_uri, contact, mime_type, body):
body = body.strip(" \t\r\n")
if not self.handle_cmd(None, from_uri, body):
self.acc.send_pager(from_uri, "You have not joined the chat room. Type '--join' to join or '--help' for the help")
def on_pager_status(self, to_uri, body, im_id, code, reason):
pass
def on_typing(self, from_uri, contact, is_typing):
pass
##############################################################################
#
#
# main()
#
#
if __name__ == "__main__":
bot = Bot()
bot.main(CFG_FILE) | unknown | codeparrot/codeparrot-clean | ||
import unittest
from programy.processors.post.formatnumbers import FormatNumbersPostProcessor
from programy.bot import Bot
from programy.brain import Brain
from programy.config.brain import BrainConfiguration
from programy.config.bot import BotConfiguration
class FormatNmbersTests(unittest.TestCase):
def setUp(self):
self.bot = Bot(Brain(BrainConfiguration()), config=BotConfiguration())
def test_format_numbers(self):
processor = FormatNumbersPostProcessor()
result = processor.process(self.bot, "testid", "23")
self.assertIsNotNone(result)
self.assertEqual("23", result)
result = processor.process(self.bot, "testid", "23.45")
self.assertIsNotNone(result)
self.assertEqual("23.45", result)
result = processor.process(self.bot, "testid", "23. 45")
self.assertIsNotNone(result)
self.assertEqual("23.45", result)
result = processor.process(self.bot, "testid", "23 . 45")
self.assertIsNotNone(result)
self.assertEqual("23.45", result)
result = processor.process(self.bot, "testid", "23,450")
self.assertIsNotNone(result)
self.assertEqual("23,450", result)
result = processor.process(self.bot, "testid", "23, 450")
self.assertIsNotNone(result)
self.assertEqual("23,450", result)
result = processor.process(self.bot, "testid", "23, 450, 000")
self.assertIsNotNone(result)
self.assertEqual("23,450,000", result) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# Copyright (c) 2019 Pieter Wuille
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utility functions related to output descriptors"""
INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "
CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
GENERATOR = [0xf5dee51989, 0xa9fdca3312, 0x1bab10e32d, 0x3706b1677a, 0x644d626ffd]
def descsum_polymod(symbols):
"""Internal function that computes the descriptor checksum."""
chk = 1
for value in symbols:
top = chk >> 35
chk = (chk & 0x7ffffffff) << 5 ^ value
for i in range(5):
chk ^= GENERATOR[i] if ((top >> i) & 1) else 0
return chk
def descsum_expand(s):
"""Internal function that does the character to symbol expansion"""
groups = []
symbols = []
for c in s:
if not c in INPUT_CHARSET:
return None
v = INPUT_CHARSET.find(c)
symbols.append(v & 31)
groups.append(v >> 5)
if len(groups) == 3:
symbols.append(groups[0] * 9 + groups[1] * 3 + groups[2])
groups = []
if len(groups) == 1:
symbols.append(groups[0])
elif len(groups) == 2:
symbols.append(groups[0] * 3 + groups[1])
return symbols
def descsum_create(s):
"""Add a checksum to a descriptor without"""
symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0]
checksum = descsum_polymod(symbols) ^ 1
return s + '#' + ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8))
def descsum_check(s, require=True):
"""Verify that the checksum is correct in a descriptor"""
if not '#' in s:
return not require
if s[-9] != '#':
return False
if not all(x in CHECKSUM_CHARSET for x in s[-8:]):
return False
symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]]
return descsum_polymod(symbols) == 1 | unknown | codeparrot/codeparrot-clean | ||
#![cfg(feature = "macros")]
#![allow(clippy::disallowed_names)]
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(all(target_family = "wasm", not(target_os = "wasi"))))]
use tokio::test as maybe_tokio_test;
use tokio::sync::oneshot;
use tokio_test::{assert_ok, assert_pending, assert_ready};
use std::future::poll_fn;
use std::task::Poll::Ready;
#[maybe_tokio_test]
async fn sync_one_lit_expr_comma() {
let foo = tokio::select! {
foo = async { 1 } => foo,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn no_branch_else_only() {
let foo = tokio::select! {
else => 1,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn no_branch_else_only_biased() {
let foo = tokio::select! {
biased;
else => 1,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn nested_one() {
let foo = tokio::select! {
foo = async { 1 } => tokio::select! {
bar = async { foo } => bar,
},
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_lit_expr_no_comma() {
let foo = tokio::select! {
foo = async { 1 } => foo
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_lit_expr_block() {
let foo = tokio::select! {
foo = async { 1 } => { foo }
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_await() {
let foo = tokio::select! {
foo = one() => foo,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_ident() {
let one = one();
let foo = tokio::select! {
foo = one => foo,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_two() {
use std::cell::Cell;
let cnt = Cell::new(0);
let res = tokio::select! {
foo = async {
cnt.set(cnt.get() + 1);
1
} => foo,
bar = async {
cnt.set(cnt.get() + 1);
2
} => bar,
};
assert_eq!(1, cnt.get());
assert!(res == 1 || res == 2);
}
#[maybe_tokio_test]
async fn drop_in_fut() {
let s = "hello".to_string();
let res = tokio::select! {
foo = async {
let v = one().await;
drop(s);
v
} => foo
};
assert_eq!(res, 1);
}
#[maybe_tokio_test]
#[cfg(feature = "full")]
async fn one_ready() {
let (tx1, rx1) = oneshot::channel::<i32>();
let (_tx2, rx2) = oneshot::channel::<i32>();
tx1.send(1).unwrap();
let v = tokio::select! {
res = rx1 => {
assert_ok!(res)
},
_ = rx2 => unreachable!(),
};
assert_eq!(1, v);
}
#[maybe_tokio_test]
#[cfg(feature = "full")]
async fn select_streams() {
use tokio::sync::mpsc;
let (tx1, mut rx1) = mpsc::unbounded_channel::<i32>();
let (tx2, mut rx2) = mpsc::unbounded_channel::<i32>();
tokio::spawn(async move {
assert_ok!(tx2.send(1));
tokio::task::yield_now().await;
assert_ok!(tx1.send(2));
tokio::task::yield_now().await;
assert_ok!(tx2.send(3));
tokio::task::yield_now().await;
drop((tx1, tx2));
});
let mut rem = true;
let mut msgs = vec![];
while rem {
tokio::select! {
Some(x) = rx1.recv() => {
msgs.push(x);
}
Some(y) = rx2.recv() => {
msgs.push(y);
}
else => {
rem = false;
}
}
}
msgs.sort_unstable();
assert_eq!(&msgs[..], &[1, 2, 3]);
}
#[maybe_tokio_test]
async fn move_uncompleted_futures() {
let (tx1, mut rx1) = oneshot::channel::<i32>();
let (tx2, mut rx2) = oneshot::channel::<i32>();
tx1.send(1).unwrap();
tx2.send(2).unwrap();
let ran;
tokio::select! {
res = &mut rx1 => {
assert_eq!(1, assert_ok!(res));
assert_eq!(2, assert_ok!(rx2.await));
ran = true;
},
res = &mut rx2 => {
assert_eq!(2, assert_ok!(res));
assert_eq!(1, assert_ok!(rx1.await));
ran = true;
},
}
assert!(ran);
}
#[maybe_tokio_test]
async fn nested() {
let res = tokio::select! {
x = async { 1 } => {
tokio::select! {
y = async { 2 } => x + y,
}
}
};
assert_eq!(res, 3);
}
#[cfg(target_pointer_width = "64")]
mod pointer_64_tests {
use super::maybe_tokio_test;
use futures::future;
use std::mem;
#[maybe_tokio_test]
async fn struct_size_1() {
let fut = async {
let ready = future::ready(0i32);
tokio::select! {
_ = ready => {},
}
};
assert_eq!(mem::size_of_val(&fut), 32);
}
#[maybe_tokio_test]
async fn struct_size_2() {
let fut = async {
let ready1 = future::ready(0i32);
let ready2 = future::ready(0i32);
tokio::select! {
_ = ready1 => {},
_ = ready2 => {},
}
};
assert_eq!(mem::size_of_val(&fut), 40);
}
#[maybe_tokio_test]
async fn struct_size_3() {
let fut = async {
let ready1 = future::ready(0i32);
let ready2 = future::ready(0i32);
let ready3 = future::ready(0i32);
tokio::select! {
_ = ready1 => {},
_ = ready2 => {},
_ = ready3 => {},
}
};
assert_eq!(mem::size_of_val(&fut), 48);
}
}
#[maybe_tokio_test]
async fn mutable_borrowing_future_with_same_borrow_in_block() {
let mut value = 234;
tokio::select! {
_ = require_mutable(&mut value) => { },
_ = async_noop() => {
value += 5;
},
}
assert!(value >= 234);
}
#[maybe_tokio_test]
async fn mutable_borrowing_future_with_same_borrow_in_block_and_else() {
let mut value = 234;
tokio::select! {
_ = require_mutable(&mut value) => { },
_ = async_noop() => {
value += 5;
},
else => {
value += 27;
},
}
assert!(value >= 234);
}
#[maybe_tokio_test]
async fn future_panics_after_poll() {
use tokio_test::task;
let (tx, rx) = oneshot::channel();
let mut polled = false;
let f = poll_fn(|_| {
assert!(!polled);
polled = true;
Ready(None::<()>)
});
let mut f = task::spawn(async {
tokio::select! {
Some(_) = f => unreachable!(),
ret = rx => ret.unwrap(),
}
});
assert_pending!(f.poll());
assert_pending!(f.poll());
assert_ok!(tx.send(1));
let res = assert_ready!(f.poll());
assert_eq!(1, res);
}
#[maybe_tokio_test]
async fn disable_with_if() {
use tokio_test::task;
let f = poll_fn(|_| panic!());
let (tx, rx) = oneshot::channel();
let mut f = task::spawn(async {
tokio::select! {
_ = f, if false => unreachable!(),
_ = rx => (),
}
});
assert_pending!(f.poll());
assert_ok!(tx.send(()));
assert!(f.is_woken());
assert_ready!(f.poll());
}
#[maybe_tokio_test]
async fn join_with_select() {
use tokio_test::task;
let (tx1, mut rx1) = oneshot::channel();
let (tx2, mut rx2) = oneshot::channel();
let mut f = task::spawn(async {
let mut a = None;
let mut b = None;
while a.is_none() || b.is_none() {
tokio::select! {
v1 = &mut rx1, if a.is_none() => a = Some(assert_ok!(v1)),
v2 = &mut rx2, if b.is_none() => b = Some(assert_ok!(v2))
}
}
(a.unwrap(), b.unwrap())
});
assert_pending!(f.poll());
assert_ok!(tx1.send(123));
assert!(f.is_woken());
assert_pending!(f.poll());
assert_ok!(tx2.send(456));
assert!(f.is_woken());
let (a, b) = assert_ready!(f.poll());
assert_eq!(a, 123);
assert_eq!(b, 456);
}
#[tokio::test]
#[cfg(feature = "full")]
async fn use_future_in_if_condition() {
use tokio::time::{self, Duration};
tokio::select! {
_ = time::sleep(Duration::from_millis(10)), if false => {
panic!("if condition ignored")
}
_ = async { 1u32 } => {
}
}
}
#[tokio::test]
#[cfg(feature = "full")]
async fn use_future_in_if_condition_biased() {
use tokio::time::{self, Duration};
tokio::select! {
biased;
_ = time::sleep(Duration::from_millis(10)), if false => {
panic!("if condition ignored")
}
_ = async { 1u32 } => {
}
}
}
#[maybe_tokio_test]
async fn many_branches() {
let num = tokio::select! {
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
};
assert_eq!(1, num);
}
#[maybe_tokio_test]
async fn never_branch_no_warnings() {
let t = tokio::select! {
_ = async_never() => 0,
one_async_ready = one() => one_async_ready,
};
assert_eq!(t, 1);
}
async fn one() -> usize {
1
}
async fn require_mutable(_: &mut i32) {}
async fn async_noop() {}
async fn async_never() -> ! {
futures::future::pending().await
}
// From https://github.com/tokio-rs/tokio/issues/2857
#[maybe_tokio_test]
async fn mut_on_left_hand_side() {
let v = async move {
let ok = async { 1 };
tokio::pin!(ok);
tokio::select! {
mut a = &mut ok => {
a += 1;
a
}
}
}
.await;
assert_eq!(v, 2);
}
#[maybe_tokio_test]
async fn biased_one_not_ready() {
let (_tx1, rx1) = oneshot::channel::<i32>();
let (tx2, rx2) = oneshot::channel::<i32>();
let (tx3, rx3) = oneshot::channel::<i32>();
tx2.send(2).unwrap();
tx3.send(3).unwrap();
let v = tokio::select! {
biased;
_ = rx1 => unreachable!(),
res = rx2 => {
assert_ok!(res)
},
_ = rx3 => {
panic!("This branch should never be activated because `rx2` should be polled before `rx3` due to `biased;`.")
}
};
assert_eq!(2, v);
}
#[maybe_tokio_test]
#[cfg(feature = "full")]
async fn biased_eventually_ready() {
use tokio::task::yield_now;
let one = async {};
let two = async { yield_now().await };
let three = async { yield_now().await };
let mut count = 0u8;
tokio::pin!(one, two, three);
loop {
tokio::select! {
biased;
_ = &mut two, if count < 2 => {
count += 1;
assert_eq!(count, 2);
}
_ = &mut three, if count < 3 => {
count += 1;
assert_eq!(count, 3);
}
_ = &mut one, if count < 1 => {
count += 1;
assert_eq!(count, 1);
}
else => break,
}
}
assert_eq!(count, 3);
}
// https://github.com/tokio-rs/tokio/issues/3830
// https://github.com/rust-lang/rust-clippy/issues/7304
#[warn(clippy::default_numeric_fallback)]
pub async fn default_numeric_fallback() {
tokio::select! {
_ = async {} => (),
else => (),
}
}
// https://github.com/tokio-rs/tokio/issues/4182
#[maybe_tokio_test]
async fn mut_ref_patterns() {
tokio::select! {
Some(mut foo) = async { Some("1".to_string()) } => {
assert_eq!(foo, "1");
foo = "2".to_string();
assert_eq!(foo, "2");
},
};
tokio::select! {
Some(ref foo) = async { Some("1".to_string()) } => {
assert_eq!(*foo, "1");
},
};
tokio::select! {
Some(ref mut foo) = async { Some("1".to_string()) } => {
assert_eq!(*foo, "1");
*foo = "2".to_string();
assert_eq!(*foo, "2");
},
};
}
#[cfg(tokio_unstable)]
mod unstable {
use tokio::runtime::RngSeed;
#[test]
fn deterministic_select_current_thread() {
let seed = b"bytes used to generate seed";
let rt1 = tokio::runtime::Builder::new_current_thread()
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt1_values = rt1.block_on(async { (select_0_to_9().await, select_0_to_9().await) });
let rt2 = tokio::runtime::Builder::new_current_thread()
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt2_values = rt2.block_on(async { (select_0_to_9().await, select_0_to_9().await) });
assert_eq!(rt1_values, rt2_values);
}
#[test]
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
fn deterministic_select_multi_thread() {
let seed = b"bytes used to generate seed";
let (tx, rx) = std::sync::mpsc::channel();
let rt1 = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.on_thread_park(move || tx.send(()).unwrap())
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
// This makes sure that `enter_runtime()` from worker thread is called before the one from main thread,
// ensuring that the RNG state is consistent. See also https://github.com/tokio-rs/tokio/pull/7495.
rx.recv().unwrap();
let rt1_values = rt1.block_on(async {
tokio::spawn(async { (select_0_to_9().await, select_0_to_9().await) })
.await
.unwrap()
});
let (tx, rx) = std::sync::mpsc::channel();
let rt2 = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.on_thread_park(move || tx.send(()).unwrap())
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
// This makes sure that `enter_runtime()` from worker thread is called before the one from main thread,
// ensuring that the RNG state is consistent. See also https://github.com/tokio-rs/tokio/pull/7495.
rx.recv().unwrap();
let rt2_values = rt2.block_on(async {
tokio::spawn(async { (select_0_to_9().await, select_0_to_9().await) })
.await
.unwrap()
});
assert_eq!(rt1_values, rt2_values);
}
async fn select_0_to_9() -> u32 {
tokio::select!(
x = async { 0 } => x,
x = async { 1 } => x,
x = async { 2 } => x,
x = async { 3 } => x,
x = async { 4 } => x,
x = async { 5 } => x,
x = async { 6 } => x,
x = async { 7 } => x,
x = async { 8 } => x,
x = async { 9 } => x,
)
}
}
#[tokio::test]
async fn select_into_future() {
struct NotAFuture;
impl std::future::IntoFuture for NotAFuture {
type Output = ();
type IntoFuture = std::future::Ready<()>;
fn into_future(self) -> Self::IntoFuture {
std::future::ready(())
}
}
tokio::select! {
() = NotAFuture => {},
}
}
// regression test for https://github.com/tokio-rs/tokio/issues/6721
#[tokio::test]
async fn temporary_lifetime_extension() {
tokio::select! {
() = &mut std::future::ready(()) => {},
}
}
#[tokio::test]
async fn select_is_budget_aware() {
const BUDGET: usize = 128;
let task = || {
Box::pin(async move {
tokio::select! {
biased;
() = tokio::task::coop::consume_budget() => {},
() = std::future::ready(()) => {}
}
})
};
for _ in 0..BUDGET {
let poll = futures::poll!(&mut task());
assert!(poll.is_ready());
}
let poll = futures::poll!(&mut task());
assert!(poll.is_pending());
} | rust | github | https://github.com/tokio-rs/tokio | tokio/tests/macros_select.rs |
# coders by Vlamo 2012 (version: 0.2)
from Components.Converter.Converter import Converter
from Components.Element import cached
from Poll import Poll
from os import popen, statvfs
SIZE_UNITS = ["B", "KB", "MB", "GB", "TB", "PB", "EB"]
class ProgressDiskSpaceInfo(Poll, Converter):
HDDTEMP = 0
LOADAVG = 1
MEMTOTAL = 2
MEMFREE = 3
SWAPTOTAL = 4
SWAPFREE = 5
USBINFO = 6
HDDINFO = 7
FLASHINFO = 8
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
type = type.split(',')
self.shortFormat = "Short" in type
self.fullFormat = "Full" in type
if "HddTemp" in type:
self.type = self.HDDTEMP
elif "LoadAvg" in type:
self.type = self.LOADAVG
elif "MemTotal" in type:
self.type = self.MEMTOTAL
elif "MemFree" in type:
self.type = self.MEMFREE
elif "SwapTotal" in type:
self.type = self.SWAPTOTAL
elif "SwapFree" in type:
self.type = self.SWAPFREE
elif "UsbInfo" in type:
self.type = self.USBINFO
elif "HddInfo" in type:
self.type = self.HDDINFO
else:
self.type = self.FLASHINFO
if self.type in (self.FLASHINFO,self.HDDINFO,self.USBINFO):
self.poll_interval = 5000
else:
self.poll_interval = 1000
self.poll_enabled = True
def doSuspend(self, suspended):
if suspended:
self.poll_enabled = False
else:
self.downstream_elements.changed((self.CHANGED_POLL,))
self.poll_enabled = True
@cached
def getText(self):
text = "N/A"
if self.type == self.HDDTEMP:
text = self.getHddTemp()
elif self.type == self.LOADAVG:
text = self.getLoadAvg()
else:
entry = {
self.MEMTOTAL: ("Mem","Ram"),
self.MEMFREE: ("Mem","Ram"),
self.SWAPTOTAL: ("Swap","Swap"),
self.SWAPFREE: ("Swap","Swap"),
self.USBINFO: ("/media/usb","USB"),
self.HDDINFO: ("/media/hdd","HDD"),
self.FLASHINFO: ("/","Flash"),
}[self.type]
if self.type in (self.USBINFO,self.HDDINFO,self.FLASHINFO):
list = self.getDiskInfo(entry[0])
else:
list = self.getMemInfo(entry[0])
if list[0] == 0:
text = "%s: Not Available"%(entry[1])
elif self.shortFormat:
text = "%s: %s, in use: %s%%" % (entry[1], self.getSizeStr(list[0]), list[3])
elif self.fullFormat:
text = "%s: %s Free:%s Used:%s (%s%%)" % (entry[1], self.getSizeStr(list[0]), self.getSizeStr(list[2]), self.getSizeStr(list[1]), list[3])
else:
text = "%s: %s Used:%s Free:%s" % (entry[1], self.getSizeStr(list[0]), self.getSizeStr(list[1]), self.getSizeStr(list[2]))
return text
@cached
def getValue(self):
result = 0
if self.type in (self.MEMTOTAL,self.MEMFREE,self.SWAPTOTAL,self.SWAPFREE):
entry = {self.MEMTOTAL: "Mem", self.MEMFREE: "Mem", self.SWAPTOTAL: "Swap", self.SWAPFREE: "Swap"}[self.type]
result = self.getMemInfo(entry)[3]
elif self.type in (self.USBINFO,self.HDDINFO,self.FLASHINFO):
path = {self.USBINFO: "/media/usb", self.HDDINFO: "/media/hdd", self.FLASHINFO: "/"}[self.type]
result = self.getDiskInfo(path)[3]
return result
text = property(getText)
value = property(getValue)
range = 100
def getHddTemp(self):
textvalue = "No info"
info = "0"
try:
out_line = popen("hddtemp -n -q /dev/sda").readline()
info = "Hdd C:" + out_line[:4]
textvalue = info
except:
pass
return textvalue
def getLoadAvg(self):
textvalue = "No info"
info = "0"
try:
out_line = popen("cat /proc/loadavg").readline()
info = "loadavg:" + out_line[:15]
textvalue = info
except:
pass
return textvalue
def getMemInfo(self, value):
result = [0,0,0,0] # (size, used, avail, use%)
try:
check = 0
fd = open("/proc/meminfo")
for line in fd:
if value + "Total" in line:
check += 1
result[0] = int(line.split()[1]) * 1024 # size
elif value + "Free" in line:
check += 1
result[2] = int(line.split()[1]) * 1024 # avail
if check > 1:
if result[0] > 0:
result[1] = result[0] - result[2] # used
result[3] = result[1] * 100 / result[0] # use%
break
fd.close()
except:
pass
return result
def getDiskInfo(self, path):
def isMountPoint():
try:
fd = open('/proc/mounts', 'r')
for line in fd:
l = line.split()
if len(l) > 1 and l[1] == path:
return True
fd.close()
except:
return None
return False
result = [0,0,0,0] # (size, used, avail, use%)
if isMountPoint():
try:
st = statvfs(path)
except:
st = None
if not st is None and not 0 in (st.f_bsize, st.f_blocks):
result[0] = st.f_bsize * st.f_blocks # size
result[2] = st.f_bsize * st.f_bavail # avail
result[1] = result[0] - result[2] # used
result[3] = result[1] * 100 / result[0] # use%
return result
def getSizeStr(self, value, u=0):
fractal = 0
if value >= 1024:
fmt = "%(size)u.%(frac)d %(unit)s"
while (value >= 1024) and (u < len(SIZE_UNITS)):
(value, mod) = divmod(value, 1024)
fractal = mod * 10 / 1024
u += 1
else:
fmt = "%(size)u %(unit)s"
return fmt % {"size": value, "frac": fractal, "unit": SIZE_UNITS[u]}
def doSuspend(self, suspended):
if suspended:
self.poll_enabled = False
else:
self.downstream_elements.changed((self.CHANGED_POLL,))
self.poll_enabled = True | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.