repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
cordmata/mediaampy | mediaamp/services.py | 1 | 10270 | from functools import partial
import re
from .exceptions import ServiceNotAvailable
services = {}
def register(cls):
if issubclass(cls, BaseService):
key = getattr(cls, 'registry_key', None)
if not key:
key = ' '.join(re.findall('[A-Z][^A-Z]*', cls.__name__))
services[key] = cls
class Endpoint(object):
def __init__(self, path=None, name=None, service=None, **kwargs):
self.path = path
self.name = name
self.service = service
self.default_params = kwargs.copy()
self.default_params.setdefault('schema', '1.0')
def urljoin(self, *args):
parts = (self.service.base_url, self.path, self.name) + args
return '/'.join([
part.lstrip('/') for part in parts if part is not None
]).rstrip('/')
def get(self, extra_path=None, **kwargs):
return self._make_request('get', extra_path, **kwargs)
def put(self, extra_path=None, **kwargs):
return self._make_request('put', extra_path, **kwargs)
def post(self, extra_path=None, **kwargs):
return self._make_request('post', extra_path, **kwargs)
def delete(self, extra_path=None, **kwargs):
return self._make_request('delete', extra_path, **kwargs)
def _make_request(self, method, extra_path=None, **kwargs):
# merge default parameters with those supplied
params = dict(self.default_params, **kwargs.pop('params', {}))
extra_path = extra_path
url = self.urljoin(extra_path)
return self.service.session.request_json(method, url, params=params, **kwargs)
def __call__(self, **kwargs):
""" Override default URL parameters.
Allow custom overrides of defaults to look like object
initialization.
"""
self.default_params.update(kwargs)
return self
class BaseService(object):
def __init__(self, session, base_url):
self.session = session
self.base_url = base_url
self.init_endpoints()
def init_endpoints(self):
for k, v in self.__class__.__dict__.items():
if isinstance(v, Endpoint):
v.service = self
v(account=self.session.account)
if v.name is None:
v.name = k
@property
def Notifications(self):
return Endpoint(name='notify', service=self, account=self.session.account)
DataEndpoint = partial(Endpoint, path='data')
BusinessEndpoint = partial(Endpoint, path='web')
@register
class AccessDataService(BaseService):
Permission = DataEndpoint()
Role = DataEndpoint()
Authorization = BusinessEndpoint()
Lookup = BusinessEndpoint()
Registry = BusinessEndpoint()
@register
class AccountDataService(BaseService):
Account = DataEndpoint()
@register
class CommerceConfigurationDataService(BaseService):
CommerceRegistry = DataEndpoint()
CheckoutConfiguration = DataEndpoint()
FulfillmentConfiguration = DataEndpoint()
PaymentConfiguration = DataEndpoint()
Rule = DataEndpoint()
RuleSet = DataEndpoint()
TaxConfiguration = DataEndpoint()
@register
class CommerceEventDataService(BaseService):
OrderTracking = DataEndpoint()
@register
class CuePointDataService(BaseService):
CuePoint = Endpoint()
CuePointType = Endpoint()
@register
class DeliveryDataService(BaseService):
AccountSettings = DataEndpoint()
AdPolicy = DataEndpoint()
Restriction = DataEndpoint()
UserAgent = DataEndpoint()
@register
class EndUserDataService(BaseService):
Directory = DataEndpoint()
Security = DataEndpoint()
User = DataEndpoint()
Authentication = BusinessEndpoint()
Lookup = BusinessEndpoint()
Self = BusinessEndpoint()
@register
class EntertainmentDataService(BaseService):
AccountSettings = DataEndpoint()
Channel = DataEndpoint()
ChannelSchedule = DataEndpoint()
Company = DataEndpoint()
Credit = DataEndpoint()
Listing = DataEndpoint()
Location = DataEndpoint()
Person = DataEndpoint()
Program = DataEndpoint()
ProgramAvailability = DataEndpoint()
Station = DataEndpoint()
Tag = DataEndpoint()
TvSeason = DataEndpoint()
@register
class EntertainmentIngestDataService(BaseService):
IngestConfig = DataEndpoint()
IngestResult = DataEndpoint()
Process = BusinessEndpoint()
@register
class EntertainmentFeedsService(BaseService):
Feed = Endpoint(name='')
@register
class EntitlementDataService(BaseService):
AccountSettings = DataEndpoint()
Adapter = DataEndpoint()
AdapterConfiguration = DataEndpoint()
Device = DataEndpoint()
DistributionRight = DataEndpoint()
DistributionRightLicenseCount = DataEndpoint()
Entitlement = DataEndpoint()
LicenseCount = DataEndpoint()
PhysicalDevice = DataEndpoint()
ProductDevice = DataEndpoint()
Rights = DataEndpoint()
SubjectPolicy = DataEndpoint()
UserDevice = DataEndpoint()
@register
class EntitlementWebService(BaseService):
Entitlements = BusinessEndpoint()
RegisterDevice = BusinessEndpoint()
@register
class EntitlementLicenseService(BaseService):
ContentAccessRules = BusinessEndpoint(schema='1.2')
License = BusinessEndpoint(schema='2.5')
@register
class FeedReaderDataService(BaseService):
registry_key = 'FeedReader Data Service'
FeedRecord = DataEndpoint()
@register
class FeedsDataService(BaseService):
FeedAdapter = DataEndpoint()
FeedConfig = DataEndpoint()
@register
class FeedsService(BaseService):
Feed = Endpoint(name='')
@register
class FileManagementService(BaseService):
FileManagement = BusinessEndpoint()
@register
class IngestDataService(BaseService):
Adapter = DataEndpoint()
AdapterConfiguration = DataEndpoint()
Checksum = DataEndpoint()
@register
class IngestService(BaseService):
ingest = Endpoint()
test = Endpoint()
@register
class KeyDataService(BaseService):
Key = DataEndpoint()
UserKey = DataEndpoint()
@register
class LedgerDataService(BaseService):
LedgerEntr = DataEndpoint()
@register
class LiveEventDataService(BaseService):
LiveEncoder = DataEndpoint()
LiveEvent = DataEndpoint()
@register
class LiveEventService(BaseService):
Scheduling = BusinessEndpoint()
@register
class MediaDataService(BaseService):
AccountSettings = DataEndpoint()
AssetType = DataEndpoint()
Category = DataEndpoint()
Media = DataEndpoint()
MediaDefaults = DataEndpoint()
MediaFile = DataEndpoint()
Provider = DataEndpoint()
Release = DataEndpoint()
Server = DataEndpoint()
@register
class MessageDataService(BaseService):
EmailTemplate = DataEndpoint()
MessageInstruction = DataEndpoint()
MessageQueue = DataEndpoint()
NotificationFilter = DataEndpoint()
@register
class PlayerService(BaseService):
Player = Endpoint(name='p')
@register
class PlayerDataService(BaseService):
AccountSettings = DataEndpoint()
ColorScheme = DataEndpoint()
Layout = DataEndpoint()
Player = DataEndpoint()
PlugIn = DataEndpoint()
Skin = DataEndpoint()
@register
class ProductFeedsService(BaseService):
Feed = Endpoint(name='')
@register
class ProductDataService(BaseService):
AccountSettings = DataEndpoint()
PricingTemplate = DataEndpoint()
Product = DataEndpoint()
ProductTag = DataEndpoint()
Subscription = DataEndpoint()
@register
class PromotionDataService(BaseService):
Promotion = DataEndpoint()
PromotionAction = DataEndpoint()
PromotionCode = DataEndpoint()
PromotionCondition = DataEndpoint()
PromotionUseCount = DataEndpoint()
@register
class PublishDataService(BaseService):
Adapter = DataEndpoint()
AdapterConfiguration = DataEndpoint()
PublishProfile = DataEndpoint()
@register
class PublishService(BaseService):
Publish = BusinessEndpoint()
Sharing = BusinessEndpoint()
@register
class SelectorService(BaseService):
Selector = Endpoint(name='')
@register
class SharingDataService(BaseService):
OutletProfile = DataEndpoint()
ProviderAdapter = DataEndpoint()
@register
class SocialDataService(BaseService):
AccountSettings = DataEndpoint()
Comment = DataEndpoint()
Rating = DataEndpoint()
TotalRating = DataEndpoint()
@register
class AdminStorefrontService(BaseService):
Action = DataEndpoint()
Contract = DataEndpoint()
FulfillmentItem = DataEndpoint()
Order = DataEndpoint()
OrderFulfillment = DataEndpoint()
OrderHistory = DataEndpoint()
OrderItem = DataEndpoint()
OrderSummary = DataEndpoint()
PaymentInstrumentInfo = DataEndpoint()
Shipment = DataEndpoint()
Checkout = BusinessEndpoint(schema='1.5')
Payment = BusinessEndpoint(schema='1.1')
@register
class StorefrontService(BaseService):
Checkout = BusinessEndpoint(schema='1.4')
Payment = BusinessEndpoint(schema='1.1')
OrderHistory = DataEndpoint()
PaymentInstrumentInfo = DataEndpoint()
@register
class TaskService(BaseService):
Agent = DataEndpoint()
Batch = DataEndpoint()
Task = DataEndpoint()
TaskTemplate = DataEndpoint()
TaskType = DataEndpoint()
@register
class UserDataService(BaseService):
Directory = DataEndpoint()
Security = DataEndpoint()
User = DataEndpoint()
Authentication = BusinessEndpoint()
Lookup = BusinessEndpoint()
Self = BusinessEndpoint()
@register
class UserProfileDataService(BaseService):
AccountSettings = DataEndpoint()
TotalItem = DataEndpoint()
UserList = DataEndpoint()
UserListItem = DataEndpoint()
UserProfile = DataEndpoint()
@register
class ValidationService(BaseService):
Validation = BusinessEndpoint(schema='1.1')
@register
class ValidationDataService(BaseService):
ConditionalRule = DataEndpoint()
ValidationRule = DataEndpoint()
Validator = DataEndpoint()
@register
class WatchFolderDataService(BaseService):
registry_key = 'WatchFolder Data Service'
WatchFolder = DataEndpoint()
WatchFolderFile = DataEndpoint()
@register
class WorkflowDataService(BaseService):
ProfileResult = DataEndpoint()
ProfileStepResult = DataEndpoint()
WorkflowQueue = DataEndpoint()
| apache-2.0 |
devops2014/djangosite | django/contrib/gis/gdal/prototypes/ds.py | 122 | 4254 | """
This module houses the ctypes function prototypes for OGR DataSource
related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*,
OGR_Fld_* routines are relevant here.
"""
from ctypes import POINTER, c_char_p, c_double, c_int, c_long, c_void_p
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, geom_output, int_output, srs_output,
void_output, voidptr_output,
)
c_int_p = POINTER(c_int) # shortcut type
# Driver Routines
register_all = void_output(lgdal.OGRRegisterAll, [], errcheck=False)
cleanup_all = void_output(lgdal.OGRCleanupAll, [], errcheck=False)
get_driver = voidptr_output(lgdal.OGRGetDriver, [c_int])
get_driver_by_name = voidptr_output(lgdal.OGRGetDriverByName, [c_char_p], errcheck=False)
get_driver_count = int_output(lgdal.OGRGetDriverCount, [])
get_driver_name = const_string_output(lgdal.OGR_Dr_GetName, [c_void_p], decoding='ascii')
# DataSource
open_ds = voidptr_output(lgdal.OGROpen, [c_char_p, c_int, POINTER(c_void_p)])
destroy_ds = void_output(lgdal.OGR_DS_Destroy, [c_void_p], errcheck=False)
release_ds = void_output(lgdal.OGRReleaseDataSource, [c_void_p])
get_ds_name = const_string_output(lgdal.OGR_DS_GetName, [c_void_p])
get_layer = voidptr_output(lgdal.OGR_DS_GetLayer, [c_void_p, c_int])
get_layer_by_name = voidptr_output(lgdal.OGR_DS_GetLayerByName, [c_void_p, c_char_p])
get_layer_count = int_output(lgdal.OGR_DS_GetLayerCount, [c_void_p])
# Layer Routines
get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int])
get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long])
get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int])
get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p])
get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p])
get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p])
reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False)
test_capability = int_output(lgdal.OGR_L_TestCapability, [c_void_p, c_char_p])
get_spatial_filter = geom_output(lgdal.OGR_L_GetSpatialFilter, [c_void_p])
set_spatial_filter = void_output(lgdal.OGR_L_SetSpatialFilter, [c_void_p, c_void_p], errcheck=False)
set_spatial_filter_rect = void_output(lgdal.OGR_L_SetSpatialFilterRect,
[c_void_p, c_double, c_double, c_double, c_double], errcheck=False
)
# Feature Definition Routines
get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p])
get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p])
get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int])
# Feature Routines
clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p])
destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False)
feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p])
get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p])
get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p])
get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int])
get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p])
get_field_as_datetime = int_output(lgdal.OGR_F_GetFieldAsDateTime,
[c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p]
)
get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int])
get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int])
get_field_as_string = const_string_output(lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int])
get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p])
# Field Routines
get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p])
get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p])
get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p])
get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int])
get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p])
| bsd-3-clause |
EvanK/ansible | lib/ansible/modules/cloud/ovirt/ovirt_vnic_profile.py | 37 | 8981 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vnic_profile
short_description: Module to manage vNIC profile of network in oVirt/RHV
version_added: "2.8"
author:
- "Ondra Machacek (@machacekondra)"
- "Martin Necas (@mnecas)"
description:
- "Module to manage vNIC profile of network in oVirt/RHV"
options:
name:
description:
- "A human-readable name in plain text."
required: true
state:
description:
- "Should the vNIC be absent/present."
choices: ['absent', 'present']
default: present
description:
description:
- "A human-readable description in plain text."
data_center:
description:
- "Datacenter name where network reside."
required: true
network:
description:
- "Name of network to which is vNIC attached."
required: true
network_filter:
description:
- "The network filter enables to filter packets send to/from the VM's nic according to defined rules."
custom_properties:
description:
- "Custom properties applied to the vNIC profile."
- "Custom properties is a list of dictionary which can have following values:"
- "C(name) - Name of the custom property. For example: I(hugepages), I(vhost), I(sap_agent), etc."
- "C(regexp) - Regular expression to set for custom property."
- "C(value) - Value to set for custom property."
qos:
description:
- "Quality of Service attributes regulate inbound and outbound network traffic of the NIC."
port_mirroring:
description:
- "Enables port mirroring."
type: bool
pass_through:
description:
- "Enables passthrough to an SR-IOV-enabled host NIC."
choices: ['disabled', 'enabled']
migratable:
description:
- "Marks whether pass_through NIC is migratable or not."
type: bool
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Add vNIC
ovirt_vnics_profile:
name: myvnic
network: mynetwork
state: present
data_center: datacenter
- name: Editing vNICs network_filter, custom_properties, qos
ovirt_vnics_profile:
name: myvnic
network: mynetwork
data_center: datacenter
qos: myqos
custom_properties:
- name: SecurityGroups
value: 9bd9bde9-39da-44a8-9541-aa39e1a81c9d
network_filter: allow-dhcp
- name: Editing vNICs network_filter, custom_properties, qos
ovirt_vnics_profile:
name: myvnic
network: mynetwork
data_center: datacenter
qos: myqos
custom_properties:
- name: SecurityGroups
value: 9bd9bde9-39da-44a8-9541-aa39e1a81c9d
network_filter: allow-dhcp
- name: Dont use migratable
ovirt_vnics_profile:
name: myvnic
network: mynetwork
data_center: datacenter
migratable: False
pass_through: enabled
- name: Remove vNIC
ovirt_vnics_profile:
name: myvnic
network: mynetwork
state: absent
data_center: datacenter
'''
RETURN = '''
id:
description: ID of the vNIC profile which is managed
returned: On success if vNIC profile is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
vnic:
description: "Dictionary of all the vNIC profile attributes. Network interface attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
returned: On success if vNIC profile is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
search_by_name,
get_id_by_name
)
class EntityVnicPorfileModule(BaseModule):
def __init__(self, *args, **kwargs):
super(EntityVnicPorfileModule, self).__init__(*args, **kwargs)
def __get_dcs_service(self):
return self._connection.system_service().data_centers_service()
def __get_dcs_id(self):
return get_id_by_name(self.__get_dcs_service(), self.param('data_center'))
def __get_network_id(self):
networks_service = self.__get_dcs_service().service(self.__get_dcs_id()).networks_service()
return get_id_by_name(networks_service, self.param('network'))
def __get_qos_id(self):
qoss_service = self.__get_dcs_service().service(self.__get_dcs_id()).qoss_service()
return get_id_by_name(qoss_service, self.param('qos'))
def __get_network_filter_id(self):
nf_service = self._connection.system_service().network_filters_service()
return get_id_by_name(nf_service, self.param('network_filter'))
def build_entity(self):
return otypes.VnicProfile(
name=self.param('name'),
network=otypes.Network(id=self.__get_network_id()),
description=self.param('description')
if self.param('description') else None,
port_mirroring=self.param('port_mirroring'),
pass_through=otypes.VnicPassThrough(mode=otypes.VnicPassThroughMode(self.param('pass_through')))
if self.param('pass_through') else None,
migratable=self.param('migratable'),
custom_properties=[
otypes.CustomProperty(
name=cp.get('name'),
regexp=cp.get('regexp'),
value=str(cp.get('value')),
) for cp in self.param('custom_properties') if cp
] if self.param('custom_properties') is not None else None,
qos=otypes.Qos(id=self.__get_qos_id())
if self.param('qos') else None,
network_filter=otypes.NetworkFilter(id=self.__get_network_filter_id())
if self.param('network_filter') else None
)
def update_check(self, entity):
def check_custom_properties():
if self.param('custom_properties'):
current = []
if entity.custom_properties:
current = [(cp.name, cp.regexp, str(cp.value)) for cp in entity.custom_properties]
passed = [(cp.get('name'), cp.get('regexp'), str(cp.get('value'))) for cp in self.param('custom_properties') if cp]
return sorted(current) == sorted(passed)
return True
return (
check_custom_properties() and
equal(self.param('migratable'), getattr(entity, 'migratable', None)) and
equal(self.param('pass_through'), entity.pass_through.mode.name) and
equal(self.param('description'), entity.description) and
equal(self.param('network_filter'), entity.network_filter.name) and
equal(self.param('qos'), entity.qos.name) and
equal(self.param('port_mirroring'), getattr(entity, 'port_mirroring', None))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(type='str', default='present', choices=['absent', 'present']),
network=dict(type='str', required=True),
data_center=dict(type='str', required=True),
description=dict(type='str'),
name=dict(type='str', required=True),
network_filter=dict(type='str'),
custom_properties=dict(type='list'),
qos=dict(type='str'),
pass_through=dict(type='str', choices=['disabled', 'enabled']),
port_mirroring=dict(type='bool'),
migratable=dict(type='bool'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vnic_services = connection.system_service().vnic_profiles_service()
entitynics_module = EntityVnicPorfileModule(
connection=connection,
module=module,
service=vnic_services,
)
state = module.params['state']
if state == 'present':
ret = entitynics_module.create()
elif state == 'absent':
ret = entitynics_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
TUM-AERIUS/Aerius | Raspberry/Stereo/photo-client.py | 1 | 1956 | import io
import socket
import struct
import time
import picamera
# Connect a client socket to my_server:8000 (change my_server to the
# hostname of your server)
client_socket = socket.socket()
client_socket.connect(('169.254.251.208', 8000))
# Make a file-like object out of the connection
connection = client_socket.makefile('rwb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
# Start a preview and let the camera warm up for 2 seconds
camera.start_preview()
time.sleep(2)
# Note the start time and construct a stream to hold image data
# temporarily (we could write it directly to connection but in this
# case we want to find out the size of each capture first to keep
# our protocol simple)
start = time.time()
stream = io.BytesIO()
data = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]
# openCv represents images in bgr format as NumPy arrays
for foo in camera.capture_continuous(stream, format="jpeg"):
# Write the length of the capture to the stream and flush to
# ensure it actually gets sent
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
# Rewind the stream and send the image data over the wire
stream.seek(0)
connection.write(stream.read())
# If we've been capturing for more than 30 seconds, quit
if time.time() - start > 30:
break
# Reset the stream for the next capture
stream.seek(0)
stream.truncate()
data = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]
if data == "e":
break
# Write a length of zero to the stream to signal we're done
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close() | mit |
lzkelley/zcode | zcode/math/statistic.py | 1 | 25108 | """General functions for mathematical and numerical operations.
Functions
---------
- confidence_bands - Bin by `xx` to calculate confidence intervals in `yy`.
- confidence_intervals - Compute the values bounding desired confidence intervals.
- cumstats - Calculate a cumulative average and standard deviation.
- log_normal_base_10 -
- percentiles -
- stats - Get basic statistics for the given array.
- stats_str - Return a string with the statistics of the given array.
- sigma - Convert from standard deviation to percentiles.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
import numpy as np
import scipy as sp
import scipy.stats # noqa
from zcode import utils
from zcode.math import math_core
__all__ = [
'confidence_bands', 'confidence_intervals',
'cumstats', 'frac_str', 'info', 'log_normal_base_10', 'mean',
'percs_from_sigma', 'quantiles', 'random_power', 'sigma',
'stats', 'stats_str', 'std',
'LH_Sampler',
# DEPRECATED
'percentiles'
]
def confidence_bands(xx, yy, xbins=10, xscale='lin', percs=[0.68, 0.95], filter=None):
"""Bin the given data with respect to `xx` and calculate confidence intervals in `yy`.
Arguments
---------
xx : array_like scalars
Data values for the axis by which to bin.
yy : array_like scalars
Data values for the axis in which to calculate confidence intervals, with values
corresponding to each of the `xx` values. Must have the same number of elements
as `xx`.
xbins : int or array_like of scalar
Specification for bins in `xx`. Either a
* int, describing the number of bins `N` to create automatically with scale `xscale`.
* array_like scalar, describing the `N+1` edges of each bin (left and right).
xscale : str
Specification of xbin scaling if bins are to be calculated automatically, {'lin', 'log'}.
Ignored if bin edges are given explicitly to `xbins`.
confInt : scalar or array_like of scalar
The percentage confidence intervals to calculate (e.g. 0.5 for median).
Must be between {0.0, 1.0}.
filter : str or `None`
Returns
-------
(for number of bins `N`)
count : (N,) array of int
The number of points in each xbin.
med : (N,) array of float
The median value of points in each bin
conf : array or ndarray of float
Values describing the confidence intervals.
If a single `confInt` is given, this will have shape (N,2);
If `M` `confInt` values are given, this will have shape (N,M,2)
Where in each case the 0th and 1st element of the last dimension is the lower and upper
confidence bounds respectively.
xbins : (N+1,) array of float
Location of bin edges.
"""
squeeze = False
if not np.iterable(percs):
squeeze = True
percs = [percs]
xx = np.asarray(xx).flatten()
yy = np.asarray(yy).flatten()
if xx.shape != yy.shape:
errStr = "Shapes of `xx` and `yy` must match ('{}' vs. '{}'."
errStr = errStr.format(str(xx.shape), str(yy.shape))
raise ValueError(errStr)
# Filter based on whether `yy` values match `filter` comparison to 0.0
if filter is not None:
compFunc = math_core._comparison_function(filter)
inds = compFunc(yy, 0.0)
xx = xx[inds]
yy = yy[inds]
# Create bins
xbins = math_core.asBinEdges(xbins, xx, scale=xscale)
nbins = xbins.size - 1
# Find the entries corresponding to each bin
groups = math_core.groupDigitized(xx, xbins[1:], edges='right')
# Allocate storage for results
med = np.zeros(nbins)
conf = np.zeros((nbins, np.size(percs), 2))
count = np.zeros(nbins, dtype=int)
# Calculate medians and confidence intervals
for ii, gg in enumerate(groups):
count[ii] = np.size(gg)
if count[ii] == 0: continue
mm, cc = confidence_intervals(yy[gg], percs=percs)
med[ii] = mm
conf[ii, ...] = cc[...]
if squeeze:
conf = conf.squeeze()
return count, med, conf, xbins
def confidence_intervals(vals, sigma=None, percs=None, weights=None, axis=None,
filter=None, return_ci=False,
# DEPRECATED ARGUMENTS:
ci=None):
"""Compute the values bounding the target confidence intervals for an array of data.
Arguments
---------
vals : array_like of scalars
Data over which to calculate confidence intervals.
This can be an arbitrarily shaped ndarray.
sigma : (M,) array_like of float
Confidence values as standard-deviations, converted to percentiles.
percs : (M,) array_like of floats
List of desired confidence intervals as fractions (e.g. `[0.68, 0.95]`)
axis : int or None
Axis over which to calculate confidence intervals, or 'None' to marginalize over all axes.
filter : str or `None`
Filter the input array with a boolean comparison to zero.
If no values remain after filtering, ``NaN, NaN`` is returned.
return_ci : bool
Return the confidence-interval values used (i.e. percentiles)
ci : DEPRECATED, use `percs` instead
Returns
-------
med : scalar
Median of the input data.
`None` if there are no values (e.g. after filtering).
conf : ([L, ]M, 2) ndarray of scalar
Bounds for each confidence interval. Shape depends on the number of confidence intervals
passed in `percs`, and the input shape of `vals`.
`None` if there are no values (e.g. after filtering).
If `vals` is 1D or `axis` is 'None', then the output shape will be (M, 2).
If `vals` has more than one-dimension, and `axis` is not 'None', then the shape `L`
will be the shape of `vals`, minus the `axis` axis.
For example,
if ``vals.shape = (4,3,5)` and `axis=1`, then `L = (4,5)`
the final output shape will be: (4,5,M,2).
percs : (M,) ndarray of float, optional
The percentile-values used for calculating confidence intervals.
Only returned if `return_ci` is True.
"""
percs = utils.dep_warn_var("ci", ci, "percs", percs)
if percs is not None and sigma is not None:
raise ValueError("Only provide *either* `percs` or `sigma`!")
if percs is None:
if sigma is None:
sigma = [1.0, 2.0, 3.0]
percs = percs_from_sigma(sigma)
percs = np.atleast_1d(percs)
if np.any(percs < 0.0) or np.all(percs > 1.0):
raise ValueError("`percs` must be [0.0, 1.0]! {}".format(stats_str(percs)))
# PERC_FUNC = np.percentile
def PERC_FUNC(xx, pp, **kwargs):
return quantiles(xx, pp/100.0, weights=weights, **kwargs)
# Filter input values
if filter is not None:
# Using the filter will flatten the array, so `axis` wont work...
kw = {}
if (axis is not None) and np.ndim(vals) > 1:
kw['axis'] = axis
if weights is not None:
raise NotImplementedError("`weights` argument does not work with `filter`!")
vals = math_core.comparison_filter(vals, filter, mask=True) # , **kw)
# vals = np.ma.filled(vals, np.nan)
# PERC_FUNC = np.nanpercentile # noqa
if vals.size == 0:
return np.nan, np.nan
# Calculate confidence-intervals and median
cdf_vals = np.array([(1.0-percs)/2.0, (1.0+percs)/2.0]).T
# This produces an ndarray with shape `[M, 2(, L)]`
# If ``axis is None`` or `np.ndim(vals) == 1` then the shape will be simply `[M, 2]`
# Otherwise, `L` will be the shape of `vals` without axis `axis`.
conf = [[PERC_FUNC(vals, 100.0*cdf[0], axis=axis),
PERC_FUNC(vals, 100.0*cdf[1], axis=axis)]
for cdf in cdf_vals]
conf = np.array(conf)
# Reshape from `[M, 2, L]` to `[L, M, 2]`
if (np.ndim(vals) > 1) and (axis is not None):
conf = np.moveaxis(conf, -1, 0)
med = PERC_FUNC(vals, 50.0, axis=axis)
if len(conf) == 1:
conf = conf[0]
if return_ci:
return med, conf, percs
return med, conf
def cumstats(arr):
"""Calculate a cumulative average and standard deviation.
Arguments
---------
arr <flt>[N] : input array
Returns
-------
ave <flt>[N] : cumulative average over ``arr``
std <flt>[N] : cumulative standard deviation over ``arr``
"""
tot = len(arr)
num = np.arange(tot)
std = np.zeros(tot)
# Cumulative sum
sm1 = np.cumsum(arr)
# Cumulative sum of squares
sm2 = np.cumsum(np.square(arr))
# Cumulative average
ave = sm1/(num+1.0)
std[1:] = np.fabs(sm2[1:] - np.square(sm1[1:])/(num[1:]+1.0))/num[1:]
std[1:] = np.sqrt(std[1:])
return ave, std
def frac_str(num, den=None, frac_fmt=None, dec_fmt=None):
"""Create a string of the form '{}/{} = {}' for reporting fractional values.
"""
if den is None:
assert num.dtype == bool, "If no `den` is given, array must be boolean!"
den = num.size
num = np.count_nonzero(num)
try:
dec_frac = num / den
except ZeroDivisionError:
dec_frac = np.nan
if frac_fmt is None:
frac_exp = np.fabs(np.log10([num, den]))
if np.any(frac_exp >= 4):
frac_fmt = ".1e"
else:
frac_fmt = "d"
if dec_fmt is None:
dec_exp = np.fabs(np.log10(dec_frac))
if dec_exp > 3:
dec_fmt = ".3e"
else:
dec_fmt = ".4f"
fstr = "{num:{ff}}/{den:{ff}} = {frac:{df}}".format(
num=num, den=den, frac=dec_frac, ff=frac_fmt, df=dec_fmt)
return fstr
def info(array, shape=True, sample=3, stats=True):
rv = ""
if shape:
rv += "{} ".format(np.shape(array))
if (sample is not None) and (sample > 0):
rv += "{} ".format(math_core.str_array(array, sides=sample))
if stats:
rv += "{} ".format(stats_str(array, label=False))
return rv
def log_normal_base_10(mu, sigma, size=None, shift=0.0):
"""Draw from a lognormal distribution with values in base-10 (instead of e).
Arguments
---------
mu : (N,) scalar
Mean of the distribution in linear space (e.g. 1.0e8 instead of 8.0).
sigma : (N,) scalar
Variance of the distribution *in dex* (e.g. 1.0 means factor of 10.0 variance)
size : (M,) int
Desired size of sample.
Returns
-------
dist : (M,...) scalar
Resulting distribution of values (in linear space).
"""
_sigma = np.log(10**sigma)
dist = np.random.lognormal(np.log(mu) + shift*np.log(10.0), _sigma, size)
return dist
def mean(vals, weights=None, **kwargs):
if weights is None:
return np.mean(vals, **kwargs)
ave = np.sum(vals*weights, **kwargs) / np.sum(weights, **kwargs)
return ave
def percentiles(*args, **kwargs):
utils.dep_warn("percentiles", newname="quantiles")
return quantiles(*args, **kwargs)
def quantiles(values, percs=None, sigmas=None, weights=None, axis=None,
values_sorted=False, filter=None):
"""Compute weighted percentiles.
Copied from @Alleo answer: http://stackoverflow.com/a/29677616/230468
NOTE: if `values` is a masked array, then only unmasked values are used!
Arguments
---------
values: (N,)
input data
percs: (M,) scalar [0.0, 1.0]
Desired percentiles of the data.
weights: (N,) or `None`
Weighted for each input data point in `values`.
values_sorted: bool
If True, then input values are assumed to already be sorted.
Returns
-------
percs : (M,) float
Array of percentiles of the weighted input data.
"""
if filter is not None:
values = math_core.comparison_filter(values, filter)
if not isinstance(values, np.ma.MaskedArray):
values = np.asarray(values)
if percs is None:
percs = sp.stats.norm.cdf(sigmas)
if np.ndim(values) > 1:
if axis is None:
values = values.flatten()
else:
if axis is not None:
raise ValueError("Cannot act along axis '{}' for 1D data!".format(axis))
percs = np.array(percs)
if weights is None:
weights = np.ones_like(values)
weights = np.array(weights)
try:
weights = np.ma.masked_array(weights, mask=values.mask)
except AttributeError:
pass
assert np.all(percs >= 0.0) and np.all(percs <= 1.0), 'percentiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values, axis=axis)
values = np.take_along_axis(values, sorter, axis=axis)
weights = np.take_along_axis(weights, sorter, axis=axis)
if axis is None:
weighted_quantiles = np.cumsum(weights) - 0.5 * weights
weighted_quantiles /= np.sum(weights)
percs = np.interp(percs, weighted_quantiles, values)
return percs
weights = np.moveaxis(weights, axis, -1)
values = np.moveaxis(values, axis, -1)
weighted_quantiles = np.cumsum(weights, axis=-1) - 0.5 * weights
weighted_quantiles /= np.sum(weights, axis=-1)[..., np.newaxis]
# weighted_quantiles = np.moveaxis(weighted_quantiles, axis, -1)
percs = [np.interp(percs, weighted_quantiles[idx], values[idx])
for idx in np.ndindex(values.shape[:-1])]
percs = np.array(percs)
return percs
def percs_from_sigma(sigma, side='in', boundaries=False):
"""Convert from standard deviation 'sigma' to percentiles in/out-side the normal distribution.
Arguments
---------
sig : (N,) array_like scalar
Standard deviations.
side : str, {'in', 'out'}
Calculate percentiles inside (i.e. [-sig, sig]) or ouside (i.e. [-inf, -sig] U [sig, inf])
boundaries : bool
Whether boundaries should be given ('True'), or the area ('False').
Returns
-------
vals : (N,) array_like scalar
Percentiles corresponding to the input `sig`.
"""
if side.startswith('in'):
inside = True
elif side.startswith('out'):
inside = False
else:
raise ValueError("`side` = '{}' must be {'in', 'out'}.".format(side))
# From CDF from -inf to `sig`
cdf = sp.stats.norm.cdf(sigma)
# Area outside of [-sig, sig]
vals = 2.0 * (1.0 - cdf)
# Convert to area inside [-sig, sig]
if inside:
vals = 1.0 - vals
# Convert from area to locations of boundaries (fractions)
if boundaries:
if inside:
vlo = 0.5*(1 - vals)
vhi = 0.5*(1 + vals)
else:
vlo = 0.5*vals
vhi = 1.0 - 0.5*vals
return vlo, vhi
return vals
def random_power(extr, pdf_index, size=1, **kwargs):
"""Draw from power-law PDF with the given extrema and index.
Arguments
---------
extr : array_like scalar
The minimum and maximum value of this array are used as extrema.
pdf_index : scalar
The power-law index of the PDF distribution to be drawn from. Any real number is valid,
positive or negative.
NOTE: the `numpy.random.power` function uses the power-law index of the CDF, i.e. `g+1`
size : scalar
The number of points to draw (cast to int).
**kwags : dict pairs
Additional arguments passed to `zcode.math_core.minmax` with `extr`.
Returns
-------
rv : (N,) scalar
Array of random variables with N=`size` (default, size=1).
"""
# if not np.isscalar(pdf_index):
# err = "`pdf_index` (shape {}; {}) must be a scalar value!".format(
# np.shape(pdf_index), pdf_index)
# raise ValueError(err)
extr = math_core.minmax(extr, filter='>', **kwargs)
if pdf_index == -1:
rv = 10**np.random.uniform(*np.log10(extr), size=int(size))
else:
rr = np.random.random(size=int(size))
gex = extr ** (pdf_index+1)
rv = (gex[0] + (gex[1] - gex[0])*rr) ** (1./(pdf_index+1))
return rv
def sigma(*args, **kwargs):
# ---- DECPRECATION SECTION ----
utils.dep_warn("sigma", newname="percs_from_sigma")
# ------------------------------
return percs_from_sigma(*args, **kwargs)
def stats(vals, median=False):
"""Get basic statistics for the given array.
Arguments
---------
vals <flt>[N] : input array
median <bool> : include median in return values
Returns
-------
ave <flt>
std <flt>
[med <flt>] : median, returned if ``median`` is `True`
"""
ave = np.average(vals)
std = np.std(vals)
if(median):
med = np.median(vals)
return ave, std, med
return ave, std
def stats_str(data, percs=[0.0, 0.16, 0.50, 0.84, 1.00], ave=False, std=False, weights=None,
format=None, log=False, label=True, label_log=True, filter=None):
"""Return a string with the statistics of the given array.
Arguments
---------
data : ndarray of scalar
Input data from which to calculate statistics.
percs : array_like of scalars in {0, 100}
Which percentiles to calculate.
ave : bool
Include average value in output.
std : bool
Include standard-deviation in output.
format : str
Formatting for all numerical output, (e.g. `":.2f"`).
log : bool
Convert values to log10 before printing.
label : bool
Add label for which percentiles are being printed
label_log : bool
If `log` is also true, append a string saying these are log values.
Output
------
out : str
Single-line string of the desired statistics.
"""
# data = np.array(data).astype(np.float)
data = np.array(data)
if filter is not None:
data = math_core.comparison_filter(data, filter)
if np.size(data) == 0:
return "empty after filtering"
if log:
data = np.log10(data)
percs = np.atleast_1d(percs)
if np.any(percs > 1.0):
warnings.warn("WARNING: zcode.math.statistic: input `percs` should be [0.0, 1.0], "
"dividing these by 100.0!")
percs /= 100.0
percs_flag = False
if (percs is not None) and len(percs):
percs_flag = True
out = ""
if format is None:
allow_int = False if (ave or std) else True
format = math_core._guess_str_format_from_range(data, allow_int=allow_int)
# If a `format` is given, but missing the colon, add the colon
if len(format) and not format.startswith(':'):
format = ':' + format
form = "{{{}}}".format(format)
# Add average
if ave:
out += "ave = " + form.format(np.average(data))
if std or percs_flag:
out += ", "
# Add standard-deviation
if std:
out += "std = " + form.format(np.std(data))
if percs_flag:
out += ", "
# Add percentiles
if percs_flag:
tiles = quantiles(data, percs, weights=weights).astype(data.dtype)
out += "(" + ", ".join(form.format(tt) for tt in tiles) + ")"
if label:
out += ", for (" + ", ".join("{:.0f}%".format(100*pp) for pp in percs) + ")"
# Note if these are log-values
if log and label_log:
out += " (log values)"
return out
def std(vals, weights=None, **kwargs):
"""
See: https://www.itl.nist.gov/div898/software/dataplot/refman2/ch2/weightsd.pdf
"""
if weights is None:
return np.std(vals, **kwargs)
mm = np.count_nonzero(weights)
ave = mean(vals, weights=weights, **kwargs)
num = np.sum(weights * (vals - ave)**2)
den = np.sum(weights) * (mm - 1) / mm
std = np.sqrt(num/den)
return std
class LH_Sampler:
"""
Much of this code was taken from the pyDOE project:
- https://github.com/tisimst/pyDOE
This code was originally published by the following individuals for use with
Scilab:
Copyright (C) 2012 - 2013 - Michael Baudin
Copyright (C) 2012 - Maria Christopoulou
Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
Copyright (C) 2009 - Yann Collette
Copyright (C) 2009 - CEA - Jean-Marc Martinez
website: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
"""
'''
@classmethod
def oversample(cls, npar, nsamp, oversamp, **kwargs):
if not isinstance(oversamp, int) or oversamp < 1:
raise ValueError(f"`oversamp` argument '{oversamp}' must be an integer!")
samples = None
for ii in range(oversamp):
ss = cls.sample(npar, nsamp=nsamp, **kwargs)
if samples is None:
samples = ss
else:
samples = np.append(samples, ss, axis=-1)
return samples
'''
@classmethod
def sample(cls, vals, nsamp=None, **kwargs):
if isinstance(vals, int):
return cls.sample_unit(vals, nsamp=nsamp, **kwargs)
return cls.sample_vals(vals, nsamp=nsamp, **kwargs)
@classmethod
def sample_vals(cls, vals, nsamp=None, log=False, **kwargs):
vals = np.asarray(vals)
try:
npar, check = np.shape(vals)
if (check != 2) or (npar < 2):
raise ValueError
except ValueError:
print(f"vals = {vals}")
raise ValueError(f"Shape of `vals` ({np.shape(vals)}) must be (N,2)!")
if np.isscalar(log):
log = [log] * npar
if np.any([ll not in [True, False] for ll in log]):
raise ValueError(f"`log` value(s) must be 'True' or 'False'!")
# Draw samples in [0.0, 1.0]
samps = cls.sample_unit(npar, nsamp=nsamp, **kwargs)
# Map samples to the given ranges in log or linear space
for ii, vv in enumerate(vals):
if log[ii]:
vv = np.log10(vv)
# temp = np.copy(samps[ii, :])
# samps[ii, :] *= (vv.max() - vv.min())
# samps[ii, :] += vv.min()
samps[ii, :] = (vv.max() - vv.min()) * samps[ii, :] + vv.min()
if log[ii]:
samps[ii, :] = 10.0 ** samps[ii, :]
vv = 10.0 ** vv
# if np.any((samps[ii] < vv.min()) | (samps[ii] > vv.max())):
# print(f"temp = {temp}")
# print(f"vv = {vv}")
# err = (
# f"Samples ({stats_str(samps[ii])}) exceeded "
# f"values ({math_core.minmax(vv)})"
# )
# raise ValueError(err)
return samps
@classmethod
def sample_unit(cls, npar, nsamp=None, center=False, optimize=None, iterations=10):
if nsamp is None:
nsamp = npar
# Construct optimization variables/functions
optimize = None if (optimize is None) else optimize.lower()
if optimize is not None:
if optimize.startswith('dist'):
extr = 0.0
mask = np.ones((nsamp, nsamp), dtype=bool)
comp = np.less
# Minimum euclidean distance between points
def metric(xx):
dist = (xx[:, np.newaxis, :] - xx[:, :, np.newaxis])**2
dist = np.sum(dist, axis=0)
return np.min(dist[mask])
elif optimize.startswith('corr'):
extr = np.inf
mask = np.ones((npar, npar), dtype=bool)
comp = np.greater
# Maximum correlation
metric = lambda xx: np.max(np.abs(np.corrcoef(xx)[mask]))
np.fill_diagonal(mask, False)
# iterate over randomizations
for ii in range(iterations):
cand = cls._sample(npar, nsamp, center=center)
if optimize is None:
samples = cand
break
# -- Optimize
# Calculate the metric being optimized
met = metric(cand)
# Compare the metric to the previous extrema and store new values if better
if comp(extr, met):
extr = met
samples = cand
return samples
@classmethod
def _sample(cls, npar, nsamp, center=False):
# Generate the intervals
cut = np.linspace(0, 1, nsamp + 1)
lo = cut[:-1]
hi = cut[1:]
# Fill points uniformly in each interval
shape = (npar, nsamp) # , nreals)
if center:
points = np.zeros(shape)
points[...] = 0.5 * (lo + hi)[np.newaxis, :]
else:
points = np.random.uniform(size=shape)
points = points * (hi - lo)[np.newaxis, :] + lo[np.newaxis, :]
for j in range(npar):
points[j, :] = np.random.permutation(points[j, :])
return points
| mit |
qtproject/pyside-pyside | doc/inheritance_diagram.py | 10 | 12497 | # -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:copyright: Copyright 2010-2011 by the PySide team.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import inspect
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.ext.graphviz import render_dot_html, render_dot_latex
from sphinx.util.compat import Directive
class_sig_re = re.compile(r'''^([\w.]*\.)? # module names
(\w+) \s* $ # class/final module name
''', re.VERBOSE)
class InheritanceException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False, parts=0):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
classes = self._import_classes(class_names, currmodule)
self.class_info = self._class_info(classes, show_builtins, parts)
if not self.class_info:
raise InheritanceException('No classes found for '
'inheritance diagram')
def _import_class_or_module(self, name, currmodule):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = class_sig_re.match(name).groups()
except (AttributeError, ValueError):
raise InheritanceException('Invalid class or module %r specified '
'for inheritance diagram' % name)
fullname = (path or '') + base
path = (path and path.rstrip('.') or '')
# two possibilities: either it is a module, then import it
try:
__import__(fullname)
todoc = sys.modules[fullname]
except ImportError:
# else it is a class, then import the module
if not path:
if currmodule:
# try the current module
path = currmodule
else:
raise InheritanceException(
'Could not import class %r specified for '
'inheritance diagram' % base)
try:
__import__(path)
todoc = getattr(sys.modules[path], base)
except (ImportError, AttributeError):
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % (path + '.' + base))
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name)
def _import_classes(self, class_names, currmodule):
"""Import a list of classes."""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name, currmodule))
return classes
def _class_info(self, classes, show_builtins, parts):
"""Return name and bases for all classes that are ancestors of
*classes*.
*parts* gives the number of dotted name parts that is removed from the
displayed node names.
"""
all_classes = {}
builtins = __builtins__.values()
def recurse(cls):
if not show_builtins and cls in builtins:
return
nodename = self.class_name(cls, parts)
fullname = self.class_name(cls, 0)
baselist = []
all_classes[cls] = (nodename, fullname, baselist)
for base in cls.__bases__:
if not show_builtins and base in builtins:
continue
if base.__name__ == "Object" and base.__module__ == "Shiboken":
continue
baselist.append(self.class_name(base, parts))
if base not in all_classes:
recurse(base)
for cls in classes:
recurse(cls)
return all_classes.values()
def class_name(self, cls, parts=0):
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [fullname for (_, fullname, _) in self.class_info]
# These are the default attrs for graphviz
default_graph_attrs = {
'rankdir': 'LR',
'size': '"8.0, 12.0"',
}
default_node_attrs = {
'shape': 'box',
'fontsize': 10,
'height': 0.25,
'fontname': 'Vera Sans, DejaVu Sans, Liberation Sans, '
'Arial, Helvetica, sans',
'style': '"setlinewidth(0.5)"',
}
default_edge_attrs = {
'arrowsize': 0.5,
'style': '"setlinewidth(0.5)"',
}
def _format_node_attrs(self, attrs):
return ','.join(['%s=%s' % x for x in attrs.items()])
def _format_graph_attrs(self, attrs):
return ''.join(['%s=%s;\n' % x for x in attrs.items()])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
res = []
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
for name, fullname, bases in self.class_info:
# Write the node
this_node_attrs = n_attrs.copy()
url = urls.get(fullname)
if url is not None:
this_node_attrs['URL'] = '"%s"' % url
res.append(' "%s" [%s];\n' %
(name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_attrs(e_attrs)))
res.append('}\n')
return ''.join(res)
class inheritance_diagram(nodes.General, nodes.Element):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
pass
class InheritanceDiagram(Directive):
"""
Run when the inheritance_diagram directive is first encountered.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'parts': directives.nonnegative_int,
}
def run(self):
node = inheritance_diagram()
node.document = self.state.document
env = self.state.document.settings.env
class_names = self.arguments[0].split()
class_role = env.get_domain('py').role('class')
# Store the original content for use as a hash
node['parts'] = self.options.get('parts', 0)
node['content'] = ', '.join(class_names)
# Create a graph starting with the list of classes
try:
graph = InheritanceGraph(
class_names, env.temp_data.get('py:module'),
parts=node['parts'])
except InheritanceException, err:
return [node.document.reporter.warning(err.args[0],
line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = class_role(
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
dotcode = graph.generate_dot(name, urls, env=self.builder.env)
render_dot_html(self, node, dotcode, [], 'inheritance', 'inheritance',
alt='Inheritance diagram of ' + node['content'])
raise nodes.SkipNode
def latex_visit_inheritance_diagram(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_latex(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def skip(self, node):
raise nodes.SkipNode
def setup(app):
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
latex=(latex_visit_inheritance_diagram, None),
html=(html_visit_inheritance_diagram, None),
text=(skip, None),
man=(skip, None))
app.add_directive('inheritance-diagram', InheritanceDiagram)
app.add_config_value('inheritance_graph_attrs', {}, False),
app.add_config_value('inheritance_node_attrs', {}, False),
app.add_config_value('inheritance_edge_attrs', {}, False),
| lgpl-2.1 |
nushio3/chainer | chainer/functions/batch_normalization.py | 2 | 8989 | import numpy
from chainer import cuda, Function
def _kernel_with_I(args, expr, name):
return cuda.elementwise(
'{}, int cdim, int rdim'.format(args),
'int I = i / rdim % cdim; {};'.format(expr),
name)
def _cumean_axis02(x):
with cuda.using_cumisc():
if x.shape[2] > 1:
# cumisc.mean does not support more than two dimensions
shape = x.shape
x = x.reshape(shape[0] * shape[1], shape[2])
x = cuda.cumisc.mean(x, axis=1)
x = x.reshape(shape[0], shape[1])
else:
x = x.reshape(x.shape[:2])
return cuda.cumisc.mean(x, axis=0)
def _cusum_axis02(x):
with cuda.using_cumisc():
if x.shape[2] > 1:
# cumisc.sum does not support more than two dimensions
shape = x.shape
x = x.reshape(shape[0] * shape[1], shape[2])
x = cuda.cumisc.sum(x, axis=1)
x = x.reshape(shape[0], shape[1])
else:
x = x.reshape(x.shape[:2])
return cuda.cumisc.sum(x, axis=0)
class BatchNormalization(Function):
"""Batch normalization on outputs of linear or convolution functions.
Args:
size (int or tuple of ints): Size (or shape) of channel
dimensions.
decay (float): Decay rate of moving average.
eps (float): Epsilon value for numerical stability.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <http://arxiv.org/abs/1502.03167>`_
"""
parameter_names = ( 'gamma', 'beta')
gradient_names = ('ggamma', 'gbeta')
def __init__(self, size, decay=0.9, eps=1e-5):
size = numpy.prod(size)
self.avg_mean = numpy.zeros((1, size, 1), dtype=numpy.float32)
self.avg_var = numpy.zeros_like(self.avg_mean)
self.gamma = numpy.ones_like(self.avg_mean)
self.ggamma = numpy.empty_like(self.gamma)
self.beta = numpy.zeros_like(self.avg_mean)
self.gbeta = numpy.empty_like(self.beta)
self.decay = decay
self.N = [0] # as a reference
self.eps = eps
def __call__(self, x, test=False, finetune=False):
"""Invokes the forward propagation of BatchNormalization.
BatchNormalization accepts additional arguments, which controlls three
different running mode.
Args:
x (Variable): An input variable.
test (bool): If ``True``, BatchNormalization runs in testing mode;
it normalizes the input using precomputed statistics.
finetune (bool): If ``True``, BatchNormalization runs in finetuning
mode; it accumulates the input array to compute population
statistics for normalization, and normalizes the input using
batch statistics.
If ``test`` and ``finetune`` are both ``False``, then BatchNormalization
runs in training mode; it computes moving averages of mean and variance
for evaluation during training, and normalizes the input using batch
statistics.
"""
self.use_batch_mean = not test or finetune
self.is_finetune = finetune
return Function.__call__(self, x)
def start_finetuning(self):
self.N[0] = numpy.array(0)
def forward_cpu(self, x_orig):
ldim, cdim, rdim = self._internal_shape(x_orig[0])
x = x_orig[0].reshape(ldim, cdim, rdim)
if self.use_batch_mean:
mean = x.mean(axis=(0, 2), keepdims=True)
var = x.var(axis=(0, 2), keepdims=True) + self.eps
else:
mean = self.avg_mean
var = self.avg_var
self.std = numpy.sqrt(var)
x_mu = x - mean
self.x_hat = x_mu / self.std
y = self.gamma * self.x_hat + self.beta
# Compute exponential moving average
if self.use_batch_mean:
if self.is_finetune:
self.N[0] += 1
decay = 1. / self.N[0]
else:
decay = self.decay
m = ldim * rdim
adjust = m / max(m - 1., 1.) # unbiased estimation
self.avg_mean *= decay
self.avg_mean += (1 - decay) * adjust * mean
self.avg_var *= decay
self.avg_var += (1 - decay) * adjust * var
return y.reshape(x_orig[0].shape),
def forward_gpu(self, x_orig):
ldim, cdim, rdim = self._internal_shape(x_orig[0])
x = x_orig[0].reshape(ldim, cdim, rdim)
if self.use_batch_mean:
mean = _cumean_axis02(x)
sqmean = _cumean_axis02(x * x)
var = sqmean # reuse buffer
cuda.elementwise(
'float* var, const float* mean, const float* sqmean, float eps',
'var[i] = sqmean[i] - mean[i] * mean[i] + eps',
'bn_var')(var, mean, sqmean, self.eps)
else:
mean = self.avg_mean
var = self.avg_var
coeff = cuda.empty_like(var)
bias = cuda.empty_like(var)
y = cuda.empty_like(x_orig[0])
cuda.elementwise(
'''float* coeff, float* bias, const float* mean, const float* var,
const float* gamma, const float* beta''',
'''coeff[i] = rsqrtf(var[i]) * gamma[i];
bias[i] = beta[i] - coeff[i] * mean[i];''',
'bn_fwd_prep')(coeff, bias, mean, var, self.gamma, self.beta)
_kernel_with_I(
'float* y, const float* x, const float* coeff, const float* bias',
'y[i] = coeff[I] * x[i] + bias[I]',
'bn_fwd')(y, x, coeff, bias, cdim, rdim)
# Compute exponential moving average
if self.use_batch_mean:
if self.is_finetune:
self.N[0] += 1
decay = 1. / self.N[0]
else:
decay = self.decay
m = ldim * rdim
adjust = m / max(m - 1., 1.) # unbiased estimation
kern = cuda.elementwise(
'float* mean, const float* x, float decay, float adjust',
'mean[i] = decay * mean[i] + (1 - decay) * adjust * x[i]',
'bn_moving_avg')
kern(self.avg_mean, mean, decay, adjust)
kern(self.avg_var, var, decay, adjust)
return y,
def backward_cpu(self, x_orig, gy):
# TODO(beam2d): Support backprop on inference mode
assert self.use_batch_mean and not self.is_finetune
ldim, cdim, rdim = self._internal_shape(x_orig[0])
x = x_orig[0].reshape(ldim, cdim, rdim)
gy = gy[0].reshape(ldim, cdim, rdim)
m = ldim * rdim
gbeta = gy.sum(axis=(0, 2), keepdims=True)
self.gbeta += gbeta
ggamma = (gy * self.x_hat).sum(axis=(0, 2), keepdims=True)
self.ggamma += ggamma
coeff = self.gamma / self.std
gbeta /= m
ggamma /= m
gx = coeff * (gy - self.x_hat * ggamma - gbeta)
return gx.reshape(x_orig[0].shape),
def backward_gpu(self, x_orig, gy):
# TODO(beam2d): Support backprop on inference mode
assert self.use_batch_mean and not self.is_finetune
ldim, cdim, rdim = self._internal_shape(x_orig[0])
x = x_orig[0].reshape(ldim, cdim, rdim)
gy = gy[0].reshape(ldim, cdim, rdim)
m = ldim * rdim
mean = _cumean_axis02(x)
sqmean = _cumean_axis02(x * x)
stdinv = sqmean # reuse buffer
cuda.elementwise(
'float* stdinv, const float* mean, const float* sqmean, float eps',
'stdinv[i] = rsqrtf(sqmean[i] - mean[i] * mean[i] + eps)',
'bn_stdinv')(stdinv, mean, sqmean, self.eps)
x_hat = cuda.empty_like(x)
gx = cuda.empty_like(x)
_kernel_with_I(
'float* x_hat, const float* x, const float* mean, const float* stdinv',
'x_hat[i] = (x[i] - mean[I]) * stdinv[I]',
'bn_x_hat')(x_hat, x, mean, stdinv, cdim, rdim)
mean = None
ggamma = _cusum_axis02(x_hat * gy)
gbeta = _cusum_axis02(gy)
# TODO(beam2d): Unify these lines into one kernel
self.ggamma += ggamma.reshape(self.ggamma.shape)
self.gbeta += gbeta.reshape(self.gbeta.shape)
coeff = stdinv # reuse buffer
coeff *= self.gamma
ggamma /= m
gbeta /= m
_kernel_with_I(
'''float* gx, const float* x_hat, const float* gy, const float* coeff,
const float* ggamma, const float* gbeta''',
'gx[i] = coeff[I] * (gy[i] - x_hat[i] * ggamma[I] - gbeta[I])',
'bn_bwd')(gx, x_hat, gy, coeff, ggamma, gbeta, cdim, rdim)
return gx.reshape(x_orig[0].shape),
def _internal_shape(self, x):
ldim = x.shape[0]
cdim = self.gamma.size
rdim = x.size / (ldim * cdim)
assert ldim * cdim * rdim == x.size
return ldim, cdim, rdim
| mit |
marcok/odoo_modules | hr_employee_time_clock/migrations/11.0.0.0.13/post-migrate.py | 1 | 2402 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2016 - now Bytebrand Outsourcing AG (<http://www.bytebrand.net>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import api, fields, models, SUPERUSER_ID, _
from dateutil import rrule, parser
import pytz
from datetime import datetime, date, timedelta
import calendar
import math
import logging
_logger = logging.getLogger(__name__)
def migrate(cr, version):
"""
This migration is made to calculate running time for each active employee and
write it into last attendance, which has check out. It is important to
companies that already use Employee Time Clock module.
"""
env = api.Environment(cr, SUPERUSER_ID, {})
employee_ids = env['hr.employee'].search([('active', '=', True)])
i = len(employee_ids)
analytic = env['employee.attendance.analytic']
analytic.search([]).unlink()
for employee in employee_ids:
_logger.info('\n')
_logger.info(i)
_logger.info(employee.name)
sheets = env['hr_timesheet_sheet.sheet'].search(
[('employee_id', '=', employee.id)])
for sheet in sheets:
analytic.create_line(
sheet, sheet.date_from, sheet.date_to)
attendances = env['hr.attendance'].search(
[('sheet_id', '=', sheet.id)])
for attendance in attendances:
if attendance.check_out:
analytic.recalculate_line_worktime(
attendance, {'check_out': attendance.check_out})
i -= 1
| agpl-3.0 |
Rerito/linux-ubi | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
DavidLP/home-assistant | tests/components/fritzbox/test_climate.py | 10 | 6783 | """The tests for the demo climate component."""
import unittest
from unittest.mock import Mock, patch
import requests
from homeassistant.components.fritzbox.climate import FritzboxThermostat
class TestFritzboxClimate(unittest.TestCase):
"""Test Fritz!Box heating thermostats."""
def setUp(self):
"""Create a mock device to test on."""
self.device = Mock()
self.device.name = 'Test Thermostat'
self.device.actual_temperature = 18.0
self.device.target_temperature = 19.5
self.device.comfort_temperature = 22.0
self.device.eco_temperature = 16.0
self.device.present = True
self.device.device_lock = True
self.device.lock = False
self.device.battery_low = True
self.device.set_target_temperature = Mock()
self.device.update = Mock()
mock_fritz = Mock()
mock_fritz.login = Mock()
self.thermostat = FritzboxThermostat(self.device, mock_fritz)
def test_init(self):
"""Test instance creation."""
assert 18.0 == self.thermostat._current_temperature
assert 19.5 == self.thermostat._target_temperature
assert 22.0 == self.thermostat._comfort_temperature
assert 16.0 == self.thermostat._eco_temperature
def test_supported_features(self):
"""Test supported features property."""
assert 129 == self.thermostat.supported_features
def test_available(self):
"""Test available property."""
assert self.thermostat.available
self.thermostat._device.present = False
assert not self.thermostat.available
def test_name(self):
"""Test name property."""
assert 'Test Thermostat' == self.thermostat.name
def test_temperature_unit(self):
"""Test temperature_unit property."""
assert '°C' == self.thermostat.temperature_unit
def test_precision(self):
"""Test precision property."""
assert 0.5 == self.thermostat.precision
def test_current_temperature(self):
"""Test current_temperature property incl. special temperatures."""
assert 18 == self.thermostat.current_temperature
def test_target_temperature(self):
"""Test target_temperature property."""
assert 19.5 == self.thermostat.target_temperature
self.thermostat._target_temperature = 126.5
assert self.thermostat.target_temperature is None
self.thermostat._target_temperature = 127.0
assert self.thermostat.target_temperature is None
@patch.object(FritzboxThermostat, 'set_operation_mode')
def test_set_temperature_operation_mode(self, mock_set_op):
"""Test set_temperature by operation_mode."""
self.thermostat.set_temperature(operation_mode='test_mode')
mock_set_op.assert_called_once_with('test_mode')
def test_set_temperature_temperature(self):
"""Test set_temperature by temperature."""
self.thermostat.set_temperature(temperature=23.0)
self.thermostat._device.set_target_temperature.\
assert_called_once_with(23.0)
@patch.object(FritzboxThermostat, 'set_operation_mode')
def test_set_temperature_none(self, mock_set_op):
"""Test set_temperature with no arguments."""
self.thermostat.set_temperature()
mock_set_op.assert_not_called()
self.thermostat._device.set_target_temperature.assert_not_called()
@patch.object(FritzboxThermostat, 'set_operation_mode')
def test_set_temperature_operation_mode_precedence(self, mock_set_op):
"""Test set_temperature for precedence of operation_mode arguement."""
self.thermostat.set_temperature(operation_mode='test_mode',
temperature=23.0)
mock_set_op.assert_called_once_with('test_mode')
self.thermostat._device.set_target_temperature.assert_not_called()
def test_current_operation(self):
"""Test operation mode property for different temperatures."""
self.thermostat._target_temperature = 127.0
assert 'on' == self.thermostat.current_operation
self.thermostat._target_temperature = 126.5
assert 'off' == self.thermostat.current_operation
self.thermostat._target_temperature = 22.0
assert 'heat' == self.thermostat.current_operation
self.thermostat._target_temperature = 16.0
assert 'eco' == self.thermostat.current_operation
self.thermostat._target_temperature = 12.5
assert 'manual' == self.thermostat.current_operation
def test_operation_list(self):
"""Test operation_list property."""
assert ['heat', 'eco', 'off', 'on'] == \
self.thermostat.operation_list
@patch.object(FritzboxThermostat, 'set_temperature')
def test_set_operation_mode(self, mock_set_temp):
"""Test set_operation_mode by all modes and with a non-existing one."""
values = {
'heat': 22.0,
'eco': 16.0,
'on': 30.0,
'off': 0.0}
for mode, temp in values.items():
print(mode, temp)
mock_set_temp.reset_mock()
self.thermostat.set_operation_mode(mode)
mock_set_temp.assert_called_once_with(temperature=temp)
mock_set_temp.reset_mock()
self.thermostat.set_operation_mode('non_existing_mode')
mock_set_temp.assert_not_called()
def test_min_max_temperature(self):
"""Test min_temp and max_temp properties."""
assert 8.0 == self.thermostat.min_temp
assert 28.0 == self.thermostat.max_temp
def test_device_state_attributes(self):
"""Test device_state property."""
attr = self.thermostat.device_state_attributes
assert attr['device_locked'] is True
assert attr['locked'] is False
assert attr['battery_low'] is True
def test_update(self):
"""Test update function."""
device = Mock()
device.update = Mock()
device.actual_temperature = 10.0
device.target_temperature = 11.0
device.comfort_temperature = 12.0
device.eco_temperature = 13.0
self.thermostat._device = device
self.thermostat.update()
device.update.assert_called_once_with()
assert 10.0 == self.thermostat._current_temperature
assert 11.0 == self.thermostat._target_temperature
assert 12.0 == self.thermostat._comfort_temperature
assert 13.0 == self.thermostat._eco_temperature
def test_update_http_error(self):
"""Test exception handling of update function."""
self.device.update.side_effect = requests.exceptions.HTTPError
self.thermostat.update()
self.thermostat._fritz.login.assert_called_once_with()
| apache-2.0 |
rajanandakumar/DIRAC | DataManagementSystem/scripts/dirac-dms-create-removal-request.py | 13 | 3399 | #!/usr/bin/env python
""" Create a DIRAC RemoveReplica|RemoveFile request to be executed by the RMS
"""
__RCSID__ = "ea64b42 (2012-07-29 16:45:05 +0200) ricardo <Ricardo.Graciani@gmail.com>"
import os
from hashlib import md5
import time
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.List import breakListIntoChunks
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[0],
__doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... SE LFN ...' % Script.scriptName,
'Arguments:',
' SE: StorageElement|All',
' LFN: LFN or file containing a List of LFNs' ] ) )
Script.parseCommandLine( ignoreErrors = False )
args = Script.getPositionalArgs()
if len( args ) < 2:
Script.showHelp()
targetSE = args.pop( 0 )
lfns = []
for inputFileName in args:
if os.path.exists( inputFileName ):
inputFile = open( inputFileName, 'r' )
string = inputFile.read()
inputFile.close()
lfns.extend( [ lfn.strip() for lfn in string.splitlines() ] )
else:
lfns.append( inputFileName )
from DIRAC.Resources.Storage.StorageElement import StorageElement
import DIRAC
# Check is provided SE is OK
if targetSE != 'All':
se = StorageElement( targetSE )
if not se.valid:
print se.errorReason
print
Script.showHelp()
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
reqClient = ReqClient()
fc = FileCatalog()
requestOperation = 'RemoveReplica'
if targetSE == 'All':
requestOperation = 'RemoveFile'
for lfnList in breakListIntoChunks( lfns, 100 ):
oRequest = Request()
requestName = "%s_%s" % ( md5( repr( time.time() ) ).hexdigest()[:16], md5( repr( time.time() ) ).hexdigest()[:16] )
oRequest.RequestName = requestName
oOperation = Operation()
oOperation.Type = requestOperation
oOperation.TargetSE = targetSE
res = fc.getFileMetadata( lfnList )
if not res['OK']:
print "Can't get file metadata: %s" % res['Message']
DIRAC.exit( 1 )
if res['Value']['Failed']:
print "Could not get the file metadata of the following, so skipping them:"
for fFile in res['Value']['Failed']:
print fFile
lfnMetadata = res['Value']['Successful']
for lfn in lfnMetadata:
rarFile = File()
rarFile.LFN = lfn
rarFile.Size = lfnMetadata[lfn]['Size']
rarFile.Checksum = lfnMetadata[lfn]['Checksum']
rarFile.GUID = lfnMetadata[lfn]['GUID']
rarFile.ChecksumType = 'ADLER32'
oOperation.addFile( rarFile )
oRequest.addOperation( oOperation )
isValid = RequestValidator().validate( oRequest )
if not isValid['OK']:
print "Request is not valid: ", isValid['Message']
DIRAC.exit( 1 )
result = reqClient.putRequest( oRequest )
if result['OK']:
print 'Request %d Submitted' % result['Value']
else:
print 'Failed to submit Request: ', result['Message']
| gpl-3.0 |
jacobparra/redditclone | config/urls.py | 2 | 1276 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("reddit.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
| bsd-3-clause |
valesi/electrum | lib/daemon.py | 1 | 6276 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ast, os
import jsonrpclib
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer, SimpleJSONRPCRequestHandler
from util import json_decode, DaemonThread
from wallet import WalletStorage, Wallet
from wizard import WizardBase
from commands import known_commands, Commands
from simple_config import SimpleConfig
def lockfile(config):
return os.path.join(config.path, 'daemon')
def get_daemon(config):
try:
with open(lockfile(config)) as f:
host, port = ast.literal_eval(f.read())
except:
return
server = jsonrpclib.Server('http://%s:%d' % (host, port))
# check if daemon is running
try:
server.ping()
return server
except:
pass
class RequestHandler(SimpleJSONRPCRequestHandler):
def do_OPTIONS(self):
self.send_response(200)
self.end_headers()
def end_headers(self):
self.send_header("Access-Control-Allow-Headers",
"Origin, X-Requested-With, Content-Type, Accept")
self.send_header("Access-Control-Allow-Origin", "*")
SimpleJSONRPCRequestHandler.end_headers(self)
class Daemon(DaemonThread):
def __init__(self, config, network):
DaemonThread.__init__(self)
self.config = config
self.network = network
self.gui = None
self.wallets = {}
self.wallet = None
self.cmd_runner = Commands(self.config, self.wallet, self.network)
host = config.get('rpchost', 'localhost')
port = config.get('rpcport', 0)
self.server = SimpleJSONRPCServer((host, port), requestHandler=RequestHandler, logRequests=False)
with open(lockfile(config), 'w') as f:
f.write(repr(self.server.socket.getsockname()))
self.server.timeout = 0.1
for cmdname in known_commands:
self.server.register_function(getattr(self.cmd_runner, cmdname), cmdname)
self.server.register_function(self.run_cmdline, 'run_cmdline')
self.server.register_function(self.ping, 'ping')
self.server.register_function(self.run_daemon, 'daemon')
self.server.register_function(self.run_gui, 'gui')
def ping(self):
return True
def run_daemon(self, config):
sub = config.get('subcommand')
assert sub in ['start', 'stop', 'status']
if sub == 'start':
response = "Daemon already running"
elif sub == 'status':
p = self.network.get_parameters()
response = {
'path': self.network.config.path,
'server': p[0],
'blockchain_height': self.network.get_local_height(),
'server_height': self.network.get_server_height(),
'nodes': self.network.get_interfaces(),
'connected': self.network.is_connected(),
'auto_connect': p[4],
'wallets': dict([ (k, w.is_up_to_date()) for k, w in self.wallets.items()]),
}
elif sub == 'stop':
self.stop()
response = "Daemon stopped"
return response
def run_gui(self, config_options):
config = SimpleConfig(config_options)
if self.gui:
if hasattr(self.gui, 'new_window'):
path = config.get_wallet_path()
self.gui.new_window(path, config.get('url'))
response = "ok"
else:
response = "error: current GUI does not support multiple windows"
else:
response = "Error: Electrum is running in daemon mode. Please stop the daemon first."
return response
def load_wallet(self, path, get_wizard=None):
if path in self.wallets:
wallet = self.wallets[path]
else:
storage = WalletStorage(path)
if get_wizard:
if storage.file_exists:
wallet = Wallet(storage)
action = wallet.get_action()
else:
action = 'new'
if action:
wizard = get_wizard()
wallet = wizard.run(self.network, storage)
else:
wallet.start_threads(self.network)
else:
wallet = Wallet(storage)
wallet.start_threads(self.network)
if wallet:
self.wallets[path] = wallet
return wallet
def run_cmdline(self, config_options):
config = SimpleConfig(config_options)
cmdname = config.get('cmd')
cmd = known_commands[cmdname]
path = config.get_wallet_path()
wallet = self.load_wallet(path) if cmd.requires_wallet else None
# arguments passed to function
args = map(lambda x: config.get(x), cmd.params)
# decode json arguments
args = map(json_decode, args)
# options
args += map(lambda x: config.get(x), cmd.options)
cmd_runner = Commands(config, wallet, self.network,
password=config_options.get('password'),
new_password=config_options.get('new_password'))
func = getattr(cmd_runner, cmd.name)
result = func(*args)
return result
def run(self):
while self.is_running():
self.server.handle_request()
os.unlink(lockfile(self.config))
def stop(self):
for k, wallet in self.wallets.items():
wallet.stop_threads()
DaemonThread.stop(self)
| gpl-3.0 |
pidah/st2contrib | packs/hpe-icsp/actions/icsp_server_attribute_set.py | 5 | 2444 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lib.icsp import ICSPBaseActions
class SetServerAttribute(ICSPBaseActions):
def run(self, mid, attribute_key, function,
attribute_value=None, connection_details=None):
self.set_connection(connection_details)
self.get_sessionid()
endpoint = "/rest/os-deployment-servers/%s" % (mid)
payload = {"category": "os-deployment-servers",
"customAttributes": [], "type": "OSDServer"}
if function == "Add":
payload["customAttributes"].append(
{"key": attribute_key, "values": [
{"scope": "server", "value": attribute_value}]})
# If function is set to append any undefined attributes from server
# any attribute to replace must be defined in full in the new call
currentdetails = self.icsp_get(endpoint)
payload["name"] = currentdetails['name']
for element in currentdetails['customAttributes']:
if element['values'][0]['scope'] == 'server'\
and not element['key'].startswith("__"):
if function == "Delete" and element['key'] == attribute_key:
continue
else:
if element['key'] != attribute_key:
oldatt = {"key": element['key'], "values": [
{"scope": "server",
"value": element['values'][0]['value']}]}
payload['customAttributes'].append(oldatt)
try:
self.icsp_put(endpoint, payload)
except Exception as e:
raise Exception("Error: %s" % e)
return
| apache-2.0 |
rajalokan/nova | nova/policies/server_groups.py | 1 | 2174 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-server-groups'
POLICY_ROOT = 'os_compute_api:os-server-groups:%s'
BASE_POLICY_RULE = 'rule:%s' % BASE_POLICY_NAME
server_groups_policies = [
# TODO(Kevin_Zheng): remove this rule as this not used by any API
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_OR_OWNER),
base.create_rule_default(
POLICY_ROOT % 'create',
BASE_POLICY_RULE,
"Create a new server group",
[
{
'path': '/os-server-groups',
'method': 'POST'
}
]
),
base.create_rule_default(
POLICY_ROOT % 'delete',
BASE_POLICY_RULE,
"Delete a server group",
[
{
'path': '/os-server-groups/{server_group_id}',
'method': 'DELETE'
}
]
),
base.create_rule_default(
POLICY_ROOT % 'index',
BASE_POLICY_RULE,
"List all server groups",
[
{
'path': '/os-server-groups',
'method': 'GET'
}
]
),
base.create_rule_default(
POLICY_ROOT % 'show',
BASE_POLICY_RULE,
"Show details of a server group",
[
{
'path': '/os-server-groups/{server_group_id}',
'method': 'GET'
}
]
),
]
def list_rules():
return server_groups_policies
| apache-2.0 |
andyzsf/Cactus- | cactus/plugin/loader.py | 9 | 3295 | #coding:utf-8
import os
import sys
import imp
import logging
from cactus.plugin import defaults
from cactus.utils.filesystem import fileList
logger = logging.getLogger(__name__)
class BasePluginsLoader(object):
def load(self):
raise NotImplementedError("Subclasses must implement load")
def _initialize_plugin(self, plugin):
"""
:param plugin: A plugin to initialize.
:returns: An initialized plugin with all default methods set.
"""
# Load default attributes
for attr in defaults.DEFAULTS + ['ORDER']:
if not hasattr(plugin, attr):
setattr(plugin, attr, getattr(defaults, attr))
# Name the plugin
if not hasattr(plugin, "plugin_name"):
if hasattr(plugin, "__name__"):
plugin.plugin_name = plugin.__name__
elif hasattr(plugin, "__class__"):
plugin.plugin_name = plugin.__class__.__name__
else:
plugin.plugin_name = "anonymous"
class ObjectsPluginLoader(BasePluginsLoader):
"""
Loads the plugins objects passed to this loader.
"""
def __init__(self, plugins):
"""
:param plugins: The list of plugins this loader should load.
"""
self.plugins = plugins
def load(self):
"""
:returns: The list of plugins loaded by this loader.
"""
plugins = []
# Load cactus internal plugins
for builtin_plugin in self.plugins:
self._initialize_plugin(builtin_plugin)
plugins.append(builtin_plugin)
return plugins
class CustomPluginsLoader(BasePluginsLoader):
"""
Loads all the plugins found at the path passed.
"""
def __init__(self, plugin_path):
"""
:param plugin_path: The path where the plugins should be loaded from.
"""
self.plugin_path = plugin_path
def load(self):
"""
:returns: The list of plugins loaded by this loader.
"""
plugins = []
# Load user plugins
for plugin_path in fileList(self.plugin_path):
if self._is_plugin_path(plugin_path):
custom_plugin = self._load_plugin_path(plugin_path)
if custom_plugin:
self._initialize_plugin(custom_plugin)
plugins.append(custom_plugin)
return plugins
def _is_plugin_path(self, plugin_path):
"""
:param plugin_path: A path where to look for a plugin.
:returns: Whether this path looks like an enabled plugin.
"""
if not plugin_path.endswith('.py'):
return False
if 'disabled' in plugin_path:
return False
return True
def _load_plugin_path(self, plugin_path):
"""
:param plugin_path: A path to load as a plugin.
:returns: A plugin module.
"""
module_name = "plugin_{0}".format(os.path.splitext(os.path.basename(plugin_path))[0])
try:
return imp.load_source(module_name, plugin_path)
except Exception, e:
logger.warning('Could not load plugin at path %s: %s' % (plugin_path, e))
return None
# sys.exit()
| bsd-3-clause |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/sqlite3/test/factory.py | 51 | 7928 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/test/factory.py: tests for the various factories in pysqlite
#
# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import unittest
import sqlite3 as sqlite
class MyConnection(sqlite.Connection):
def __init__(self, *args, **kwargs):
sqlite.Connection.__init__(self, *args, **kwargs)
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class MyCursor(sqlite.Cursor):
def __init__(self, *args, **kwargs):
sqlite.Cursor.__init__(self, *args, **kwargs)
self.row_factory = dict_factory
class ConnectionFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:", factory=MyConnection)
def tearDown(self):
self.con.close()
def CheckIsInstance(self):
self.failUnless(isinstance(self.con,
MyConnection),
"connection is not instance of MyConnection")
class CursorFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def tearDown(self):
self.con.close()
def CheckIsInstance(self):
cur = self.con.cursor(factory=MyCursor)
self.failUnless(isinstance(cur,
MyCursor),
"cursor is not instance of MyCursor")
class RowFactoryTestsBackwardsCompat(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckIsProducedByFactory(self):
cur = self.con.cursor(factory=MyCursor)
cur.execute("select 4+5 as foo")
row = cur.fetchone()
self.failUnless(isinstance(row,
dict),
"row is not instance of dict")
cur.close()
def tearDown(self):
self.con.close()
class RowFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckCustomFactory(self):
self.con.row_factory = lambda cur, row: list(row)
row = self.con.execute("select 1, 2").fetchone()
self.failUnless(isinstance(row,
list),
"row is not instance of list")
def CheckSqliteRowIndex(self):
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
self.failUnless(isinstance(row,
sqlite.Row),
"row is not instance of sqlite.Row")
col1, col2 = row["a"], row["b"]
self.failUnless(col1 == 1, "by name: wrong result for column 'a'")
self.failUnless(col2 == 2, "by name: wrong result for column 'a'")
col1, col2 = row["A"], row["B"]
self.failUnless(col1 == 1, "by name: wrong result for column 'A'")
self.failUnless(col2 == 2, "by name: wrong result for column 'B'")
col1, col2 = row[0], row[1]
self.failUnless(col1 == 1, "by index: wrong result for column 0")
self.failUnless(col2 == 2, "by index: wrong result for column 1")
def CheckSqliteRowIter(self):
"""Checks if the row object is iterable"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
for col in row:
pass
def CheckSqliteRowAsTuple(self):
"""Checks if the row object can be converted to a tuple"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
t = tuple(row)
def CheckSqliteRowAsDict(self):
"""Checks if the row object can be correctly converted to a dictionary"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
d = dict(row)
self.failUnlessEqual(d["a"], row["a"])
self.failUnlessEqual(d["b"], row["b"])
def CheckSqliteRowHashCmp(self):
"""Checks if the row object compares and hashes correctly"""
self.con.row_factory = sqlite.Row
row_1 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_2 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_3 = self.con.execute("select 1 as a, 3 as b").fetchone()
self.failUnless(row_1 == row_1)
self.failUnless(row_1 == row_2)
self.failUnless(row_2 != row_3)
self.failIf(row_1 != row_1)
self.failIf(row_1 != row_2)
self.failIf(row_2 == row_3)
self.failUnlessEqual(row_1, row_2)
self.failUnlessEqual(hash(row_1), hash(row_2))
self.failIfEqual(row_1, row_3)
self.failIfEqual(hash(row_1), hash(row_3))
def tearDown(self):
self.con.close()
class TextFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckUnicode(self):
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria,)).fetchone()
self.failUnless(type(row[0]) == unicode, "type of row[0] must be unicode")
def CheckString(self):
self.con.text_factory = str
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria,)).fetchone()
self.failUnless(type(row[0]) == str, "type of row[0] must be str")
self.failUnless(row[0] == austria.encode("utf-8"), "column must equal original data in UTF-8")
def CheckCustom(self):
self.con.text_factory = lambda x: unicode(x, "utf-8", "ignore")
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria.encode("latin1"),)).fetchone()
self.failUnless(type(row[0]) == unicode, "type of row[0] must be unicode")
self.failUnless(row[0].endswith(u"reich"), "column must contain original data")
def CheckOptimizedUnicode(self):
self.con.text_factory = sqlite.OptimizedUnicode
austria = unicode("Österreich", "latin1")
germany = unicode("Deutchland")
a_row = self.con.execute("select ?", (austria,)).fetchone()
d_row = self.con.execute("select ?", (germany,)).fetchone()
self.failUnless(type(a_row[0]) == unicode, "type of non-ASCII row must be unicode")
self.failUnless(type(d_row[0]) == str, "type of ASCII-only row must be str")
def tearDown(self):
self.con.close()
def suite():
connection_suite = unittest.makeSuite(ConnectionFactoryTests, "Check")
cursor_suite = unittest.makeSuite(CursorFactoryTests, "Check")
row_suite_compat = unittest.makeSuite(RowFactoryTestsBackwardsCompat, "Check")
row_suite = unittest.makeSuite(RowFactoryTests, "Check")
text_suite = unittest.makeSuite(TextFactoryTests, "Check")
return unittest.TestSuite((connection_suite, cursor_suite, row_suite_compat, row_suite, text_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| apache-2.0 |
cmcantalupo/geopm | scripts/test/TestPolicyStoreIntegration.py | 1 | 3364 | #!/usr/bin/env python
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
import unittest
import geopmpy.policy_store
import geopmpy.version
class TestPolicyStoreIntegration(unittest.TestCase):
@unittest.skipIf(not geopmpy.version.__beta__, "PolicyStoreIntegration requires beta features")
def test_all_interfaces(self):
geopmpy.policy_store.connect(':memory:')
geopmpy.policy_store.set_best('frequency_map', 'p1', [0.5, 1])
geopmpy.policy_store.set_default('frequency_map', [2, 4])
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_default('invalid_agent', [])
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_default('monitor', [0.5])
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_best('invalid_agent', 'pinv', [])
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_best('monitor', 'pinv', [0.5])
self.assertEqual([0.5, 1], geopmpy.policy_store.get_best('frequency_map', 'p1'))
self.assertEqual([2, 4], geopmpy.policy_store.get_best('frequency_map', 'p2'))
with self.assertRaises(RuntimeError):
geopmpy.policy_store.get_best('power_balancer', 'p2')
geopmpy.policy_store.disconnect()
# Attempt accesses to a closed connection
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_best('frequency_map', 'p1', [0.5, 1])
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_default('frequency_map', [2, 4])
with self.assertRaises(RuntimeError):
self.assertEqual([0.5, 1], geopmpy.policy_store.get_best('frequency_map', 'p1'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
salaria/odoo | addons/hr_payroll/wizard/__init__.py | 442 | 1159 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
import hr_payroll_contribution_register_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
smvv/trs | src/rules/fractions.py | 1 | 14707 | # This file is part of TRS (http://math.kompiler.org)
#
# TRS is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# TRS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with TRS. If not, see <http://www.gnu.org/licenses/>.
from itertools import combinations, product, ifilterfalse
from .utils import least_common_multiple, partition, is_numeric_node, \
evals_to_numeric
from ..node import ExpressionNode as N, ExpressionLeaf as L, Scope, OP_DIV, \
OP_ADD, OP_MUL, negate
from ..possibilities import Possibility as P, MESSAGES
from ..translate import _
from .negation import negate_polynome
def match_constant_division(node):
"""
a / 0 -> Division by zero
a / 1 -> a
0 / a -> 0
a / a -> 1
"""
assert node.is_op(OP_DIV)
p = []
nominator, denominator = node
# a / 0
if denominator == 0:
raise ZeroDivisionError('Division by zero: %s.' % node)
# a / 1
if denominator == 1:
p.append(P(node, division_by_one, (nominator,)))
# 0 / a
if nominator == 0:
p.append(P(node, division_of_zero, (denominator,)))
# a / a
if nominator == denominator:
p.append(P(node, division_by_self, (nominator,)))
return p
def division_by_one(root, args):
"""
a / 1 -> a
"""
return args[0].negate(root.negated)
MESSAGES[division_by_one] = _('Division by `1` yields the nominator.')
def division_of_zero(root, args):
"""
0 / a -> 0
"""
return L(0, negated=root.negated)
MESSAGES[division_of_zero] = _('Division of `0` by {1} reduces to `0`.')
def division_by_self(root, args):
"""
a / a -> 1
"""
return L(1, negated=root.negated)
MESSAGES[division_by_self] = _('Division of {1} by itself reduces to `1`.')
def match_add_fractions(node):
"""
a / b + c / b and a, c in Z -> (a + c) / b
a / b + c / d and a, b, c, d in Z -> a' / e + c' / e # e = lcm(b, d)
# | e = b * d
a / b + c and a, b, c in Z -> a / b + (bc) / b # =>* (a + bc) / b
"""
assert node.is_op(OP_ADD)
p = []
scope = Scope(node)
fractions, others = partition(lambda n: n.is_op(OP_DIV), scope)
numerics = filter(is_numeric_node, others)
for ab, cd in combinations(fractions, 2):
a, b = ab
c, d = cd
if b == d:
# Equal denominators, add nominators to create a single fraction
p.append(P(node, add_nominators, (scope, ab, cd)))
elif all(map(is_numeric_node, (a, b, c, d))):
# Denominators are both numeric, rewrite both fractions to the
# least common multiple of their denominators. Later, the
# nominators will be added
lcm = least_common_multiple(b.value, d.value)
p.append(P(node, equalize_denominators, (scope, ab, cd, lcm)))
# Also, add the (non-recommended) possibility to multiply the
# denominators. Do this only if the multiplication is not equal to
# the least common multiple, to avoid duplicate possibilities
mult = b.value * d.value
if mult != lcm:
p.append(P(node, equalize_denominators, (scope, ab, cd, mult)))
for ab, c in product(fractions, numerics):
a, b = ab
if a.is_numeric() and b.is_numeric():
# Fraction of constants added to a constant -> create a single
# constant fraction
p.append(P(node, constant_to_fraction, (scope, ab, c)))
return p
def add_nominators(root, args):
"""
a / b + c / b and a, c in Z -> (a + c) / b
"""
scope, ab, cb = args
a, b = ab
c = cb[0]
# Replace the left node with the new expression, transfer fraction
# negations to nominators
scope.replace(ab, (a.negate(ab.negated) + c.negate(cb.negated)) / b)
scope.remove(cb)
return scope.as_nary_node()
MESSAGES[add_nominators] = \
_('Add the nominators of {2} and {3} to create a single fraction.')
def equalize_denominators(root, args):
"""
a / b + c / d and a, b, c, d in Z -> a' / e + c' / e
"""
scope, denom = args[::3]
for fraction in args[1:3]:
n, d = fraction
mult = denom / d.value
if mult != 1:
if n.is_numeric():
nom = L(n.value * mult)
else:
nom = L(mult) * n
scope.replace(fraction, negate(nom / L(d.value * mult),
fraction.negated))
return scope.as_nary_node()
MESSAGES[equalize_denominators] = \
_('Equalize the denominators of divisions {2} and {3} to {4}.')
def constant_to_fraction(root, args):
"""
a / b + c and a, b, c in Z -> a / b + (bc) / b # =>* (a + bc) / b
"""
scope, ab, c = args
b = ab[1]
scope.replace(c, b * c / b)
return scope.as_nary_node()
MESSAGES[constant_to_fraction] = \
_('Rewrite constant {3} to a fraction to be able to add it to {2}.')
def match_multiply_fractions(node):
"""
a / b * c / d -> (ac) / (bd)
a / b * c and (eval(c) in Z or eval(a / b) not in Z) -> (ac) / b
"""
assert node.is_op(OP_MUL)
p = []
scope = Scope(node)
fractions, others = partition(lambda n: n.is_op(OP_DIV), scope)
for ab, cd in combinations(fractions, 2):
p.append(P(node, multiply_fractions, (scope, ab, cd)))
for ab, c in product(fractions, others):
if evals_to_numeric(c) or not evals_to_numeric(ab):
p.append(P(node, multiply_with_fraction, (scope, ab, c)))
return p
def multiply_fractions(root, args):
"""
a / b * (c / d) -> ac / (bd)
"""
scope, ab, cd = args
a, b = ab
c, d = cd
scope.replace(ab, (a * c / (b * d)).negate(ab.negated + cd.negated))
scope.remove(cd)
return scope.as_nary_node()
MESSAGES[multiply_fractions] = _('Multiply fractions {2} and {3}.')
def multiply_with_fraction(root, args):
"""
a / b * c and (eval(c) in Z or eval(a / b) not in Z) -> (ac) / b
"""
scope, ab, c = args
a, b = ab
if scope.index(ab) < scope.index(c):
nominator = a * c
else:
nominator = c * a
scope.replace(ab, negate(nominator / b, ab.negated))
scope.remove(c)
return scope.as_nary_node()
MESSAGES[multiply_with_fraction] = \
_('Multiply {3} with the nominator of fraction {2}.')
def match_divide_fractions(node):
"""
Reduce divisions of fractions to a single fraction.
Examples:
a / b / c -> a / (bc)
a / (b / c) -> ac / b
Note that:
a / b / (c / d) => ad / bd
"""
assert node.is_op(OP_DIV)
nom, denom = node
p = []
if nom.is_op(OP_DIV):
p.append(P(node, divide_fraction, tuple(nom) + (denom,)))
if denom.is_op(OP_DIV):
p.append(P(node, divide_by_fraction, (nom,) + tuple(denom)))
return p
def divide_fraction(root, args):
"""
a / b / c -> a / (bc)
"""
(a, b), c = root
return negate(a / (b * c), root.negated)
MESSAGES[divide_fraction] = \
_('Move {3} to denominator of fraction `{1} / {2}`.')
def divide_by_fraction(root, args):
"""
a / (b / c) -> ac / b
"""
a, bc = root
b, c = bc
return negate(a * c / b, root.negated + bc.negated)
MESSAGES[divide_by_fraction] = \
_('Move {3} to the nominator of fraction `{1} / {2}`.')
def is_power_combination(a, b):
"""
Check if two nodes are powers that can be combined in a fraction, for
example:
a and a^2
a^2 and a^2
a^2 and a
"""
if a.is_power():
a = a[0]
if b.is_power():
b = b[0]
return a == b
def mult_scope(node):
"""
Get the multiplication scope of a node that may or may no be a
multiplication itself.
"""
if node.is_op(OP_MUL):
return Scope(node)
return Scope(N(OP_MUL, node))
def remove_from_mult_scope(scope, node):
if len(scope) == 1:
scope.replace(node, L(1))
else:
scope.remove(node)
return scope.as_nary_node()
def match_extract_fraction_terms(node):
"""
Divide nominator and denominator by the same part. If the same root of a
power appears in both nominator and denominator, also extract it so that it
can be reduced to a single power by power division rules.
Examples:
ab / (ac) -> a / a * (c / e) # =>* c / e
a ^ b * c / (a ^ d * e) -> a ^ b / a ^ d * (c / e) # -> a^(b - d)(c / e)
ac / b and eval(c) not in Z and eval(a / b) in Z -> a / b * c
"""
assert node.is_op(OP_DIV)
n_scope, d_scope = map(mult_scope, node)
p = []
nominator, denominator = node
# ac / b
for n in ifilterfalse(evals_to_numeric, n_scope):
a_scope = mult_scope(nominator)
#a = remove_from_mult_scope(a_scope, n)
if len(a_scope) == 1:
a = L(1)
else:
a = a_scope.all_except(n)
if evals_to_numeric(a / denominator):
p.append(P(node, extract_nominator_term, (a, n)))
if len(n_scope) == 1 and len(d_scope) == 1:
return p
# a ^ b * c / (a ^ d * e)
for n, d in product(n_scope, d_scope):
if n == d:
handler = divide_fraction_by_term
elif is_power_combination(n, d):
handler = extract_fraction_terms
else:
continue # pragma: nocover
p.append(P(node, handler, (n_scope, d_scope, n, d)))
return p
def extract_nominator_term(root, args):
"""
ac / b and eval(c) not in Z and eval(a / b) in Z -> a / b * c
"""
a, c = args
return negate(a / root[1] * c, root.negated)
MESSAGES[extract_nominator_term] = \
_('Extract {2} from the nominator of fraction {0}.')
def extract_fraction_terms(root, args):
"""
a ^ b * c / (a ^ d * e) -> a ^ b / a ^ d * (c / e)
"""
n_scope, d_scope, n, d = args
div = n / d * (remove_from_mult_scope(n_scope, n) \
/ remove_from_mult_scope(d_scope, d))
return negate(div, root.negated)
MESSAGES[extract_fraction_terms] = _('Extract `{3} / {4}` from fraction {0}.')
def divide_fraction_by_term(root, args):
"""
ab / a -> b
a / (ba) -> 1 / b
a * c / (ae) -> c / e
"""
n_scope, d_scope, n, d = args
nom = remove_from_mult_scope(n_scope, n)
d_scope.remove(d)
if not len(d_scope):
return negate(nom, root.negated)
return negate(nom / d_scope.as_nary_node(), root.negated)
MESSAGES[divide_fraction_by_term] = \
_('Divide nominator and denominator of {0} by {2}.')
def match_division_in_denominator(node):
"""
a / (b / c + d) -> (ca) / (c(b / c + d))
"""
assert node.is_op(OP_DIV)
denom = node[1]
if not denom.is_op(OP_ADD):
return []
return [P(node, multiply_with_term, (n[1],))
for n in Scope(denom) if n.is_op(OP_DIV)]
def multiply_with_term(root, args):
"""
a / (b / c + d) -> (ca) / (c(b / c + d))
"""
c = args[0]
nom, denom = root
return negate(c * nom / (c * denom), root.negated)
MESSAGES[multiply_with_term] = \
_('Multiply nominator and denominator of {0} with {1}.')
def match_combine_fractions(node):
"""
a/b + c/d -> ad/(bd) + bc/(bd) # -> (ad + bc)/(bd)
"""
assert node.is_op(OP_ADD)
scope = Scope(node)
fractions = [n for n in scope if n.is_op(OP_DIV)]
p = []
for left, right in combinations(fractions, 2):
p.append(P(node, combine_fractions, (scope, left, right)))
return p
def combine_fractions(root, args):
"""
a/b + c/d -> ad/(bd) + bc/(bd)
"""
scope, ab, cd = args
(a, b), (c, d) = ab, cd
a = negate(a, ab.negated)
d = negate(d, cd.negated)
scope.replace(ab, a * d / (b * d) + b * c / (b * d))
scope.remove(cd)
return scope.as_nary_node()
MESSAGES[combine_fractions] = _('Combine fraction {2} and {3}.')
def match_remove_division_negation(node):
"""
-a / (-b + c) -> a / (--b - c)
"""
assert node.is_op(OP_DIV)
nom, denom = node
if node.negated:
if nom.is_op(OP_ADD) and any([n.negated for n in Scope(nom)]):
return [P(node, remove_division_negation, (True, nom))]
if denom.is_op(OP_ADD) and any([n.negated for n in Scope(denom)]):
return [P(node, remove_division_negation, (False, denom))]
return []
def remove_division_negation(root, args):
"""
-a / (-b + c) -> a / (--b - c)
"""
nom, denom = root
if args[0]:
nom = negate_polynome(nom, ())
else:
denom = negate_polynome(denom, ())
return negate(nom / denom, root.negated - 1)
MESSAGES[remove_division_negation] = \
_('Move negation from fraction {0} to polynome {2}.')
def match_fraction_in_division(node):
"""
(1 / a * b) / c -> b / (ac)
c / (1 / a * b) -> (ac) / b
"""
assert node.is_op(OP_DIV)
nom, denom = node
p = []
if nom.is_op(OP_MUL):
scope = Scope(nom)
for n in scope:
if n.is_op(OP_DIV) and n[0] == 1:
p.append(P(node, fraction_in_division, (True, scope, n)))
if denom.is_op(OP_MUL):
scope = Scope(denom)
for n in scope:
if n.is_op(OP_DIV) and n[0] == 1:
p.append(P(node, fraction_in_division, (False, scope, n)))
return p
def fraction_in_division(root, args):
"""
(1 / a * b) / c -> b / (ac)
c / (1 / a * b) -> (ac) / b
"""
is_nominator, scope, fraction = args
nom, denom = root
if fraction.negated or fraction[0].negated:
scope.replace(fraction, fraction[0].negate(fraction.negated))
else:
scope.remove(fraction)
if is_nominator:
nom = scope.as_nary_node()
denom = fraction[1] * denom
else:
nom = fraction[1] * nom
denom = scope.as_nary_node()
return negate(nom / denom, root.negated)
MESSAGES[fraction_in_division] = \
_('Multiply both sides of fraction {0} with {3[1]}.')
| agpl-3.0 |
zhangtuoparis13/Vintageous | tests/commands/test__vi_big_e.py | 9 | 1113 | from collections import namedtuple
from Vintageous.tests import ViewTest
from Vintageous.vi.utils import modes
test_data = namedtuple('test_data', 'text startRegion mode expectedRegion msg')
ALL_CASES = (
test_data('01. 4', (1, 1), modes.NORMAL, (2, 2), 'Normal'),
test_data('012 4', (1, 1), modes.INTERNAL_NORMAL, (1, 3), 'Internal Normal'),
test_data('0ab3 5', (1, 3), modes.VISUAL, (1, 4), 'Visual Forward'),
test_data('0b2 a5', (5, 1), modes.VISUAL, (5, 2), 'Visual Reverse no crossover'),
test_data('0ba3 5', (3, 1), modes.VISUAL, (2, 4), 'Visual Reverse crossover'),
)
class Test_vi_big_e(ViewTest):
def runTests(self, data):
for (i, data) in enumerate(data):
self.write(data.text)
self.clear_sel()
self.add_sel(self.R(*data.startRegion))
self.view.run_command('_vi_big_e', {'mode': data.mode, 'count': 1})
self.assert_equal_regions(self.R(*data.expectedRegion), self.first_sel(),
"Failed on index {} {} : Text:\"{}\" Region:{}"
.format(i, data.msg, data.text, data.startRegion))
def testAllCases(self):
self.runTests(ALL_CASES)
| mit |
nerzhul/ansible | lib/ansible/modules/cloud/azure/azure_rm_securitygroup_facts.py | 50 | 11254 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: azure_rm_securitygroup_facts
version_added: "2.1"
short_description: Get security group facts.
description:
- Get facts for a specific security group or all security groups within a resource group.
options:
name:
description:
- Only show results for a specific security group.
required: false
default: null
resource_group:
description:
- Name of the resource group to use.
required: true
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
required: false
default: null
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one security group
azure_rm_securitygroup_facts:
resource_group: Testing
name: secgroup001
- name: Get facts for all security groups
azure_rm_securitygroup_facts:
resource_group: Testing
'''
RETURN = '''
azure_securitygroups:
description: List containing security group dicts.
returned: always
type: list
example: [{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001",
"location": "eastus2",
"name": "secgroup001",
"properties": {
"defaultSecurityRules": [
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowVnetInBound",
"name": "AllowVnetInBound",
"properties": {
"access": "Allow",
"description": "Allow inbound traffic from all VMs in VNET",
"destinationAddressPrefix": "VirtualNetwork",
"destinationPortRange": "*",
"direction": "Inbound",
"priority": 65000,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "VirtualNetwork",
"sourcePortRange": "*"
}
},
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowAzureLoadBalancerInBound",
"name": "AllowAzureLoadBalancerInBound",
"properties": {
"access": "Allow",
"description": "Allow inbound traffic from azure load balancer",
"destinationAddressPrefix": "*",
"destinationPortRange": "*",
"direction": "Inbound",
"priority": 65001,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "AzureLoadBalancer",
"sourcePortRange": "*"
}
},
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/DenyAllInBound",
"name": "DenyAllInBound",
"properties": {
"access": "Deny",
"description": "Deny all inbound traffic",
"destinationAddressPrefix": "*",
"destinationPortRange": "*",
"direction": "Inbound",
"priority": 65500,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "*",
"sourcePortRange": "*"
}
},
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowVnetOutBound",
"name": "AllowVnetOutBound",
"properties": {
"access": "Allow",
"description": "Allow outbound traffic from all VMs to all VMs in VNET",
"destinationAddressPrefix": "VirtualNetwork",
"destinationPortRange": "*",
"direction": "Outbound",
"priority": 65000,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "VirtualNetwork",
"sourcePortRange": "*"
}
},
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowInternetOutBound",
"name": "AllowInternetOutBound",
"properties": {
"access": "Allow",
"description": "Allow outbound traffic from all VMs to Internet",
"destinationAddressPrefix": "Internet",
"destinationPortRange": "*",
"direction": "Outbound",
"priority": 65001,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "*",
"sourcePortRange": "*"
}
},
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/DenyAllOutBound",
"name": "DenyAllOutBound",
"properties": {
"access": "Deny",
"description": "Deny all outbound traffic",
"destinationAddressPrefix": "*",
"destinationPortRange": "*",
"direction": "Outbound",
"priority": 65500,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "*",
"sourcePortRange": "*"
}
}
],
"networkInterfaces": [
{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/nic004"
}
],
"provisioningState": "Succeeded",
"resourceGuid": "ebd00afa-5dc8-446f-810a-50dd6f671588",
"securityRules": []
},
"tags": {},
"type": "Microsoft.Network/networkSecurityGroups"
}]
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'NetworkSecurityGroup'
class AzureRMSecurityGroupFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(required=True, type='str'),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_securitygroups=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMSecurityGroupFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name is not None:
self.results['ansible_facts']['azure_securitygroups'] = self.get_item()
else:
self.results['ansible_facts']['azure_securitygroups'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.network_client.network_security_groups.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
grp = self.serialize_obj(item, AZURE_OBJECT_CLASS)
grp['name'] = item.name
result = [grp]
return result
def list_items(self):
self.log('List all items')
try:
response = self.network_client.network_security_groups.list(self.resource_group)
except Exception as exc:
self.fail("Error listing all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
grp = self.serialize_obj(item, AZURE_OBJECT_CLASS)
grp['name'] = item.name
results.append(grp)
return results
def main():
AzureRMSecurityGroupFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
ParanoidAndroid/android_external_chromium | testing/gtest/scripts/fuse_gtest_files.py | 2577 | 8813 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| bsd-3-clause |
dsysoev/fun-with-algorithms | queue/maxheap.py | 1 | 3185 |
"""
Max heap implementation
https://en.wikipedia.org/wiki/Min-max_heap
Algorithm Average
Build heap O(n)
"""
from __future__ import print_function
from math import log, ceil
class MaxHeap(object):
""" Binary Max heap implementation """
def __init__(self):
self.__data = []
def max_heapify(self, start):
""" function with which to save the properties of the heap """
left = self.left_child(start)
right = self.right_child(start)
size = self.heap_size()
if left < size and self.__data[left] > self.__data[start]:
largest = left
elif right < size:
largest = right
else:
return
if right < size and self.__data[right] > self.__data[largest]:
largest = right
if largest != start and self.__data[start] < self.__data[largest]:
self.__data[start], self.__data[largest] = self.__data[largest], self.__data[start]
self.max_heapify(largest)
def add_list(self, lst):
""" add list of elements into the heap """
self.__data += lst
for index in range(self.parent(self.heap_size() - 1), -1, -1):
self.max_heapify(index)
def add(self, value):
""" add one element into the heap """
self.add_list([value])
def extract_max(self):
""" return maximum element from the heap """
value = self.__data[0]
del self.__data[0]
for position in range(self.parent(self.heap_size() - 1), -1, -1):
self.max_heapify(position)
return value
def heap_size(self):
""" return number of elements in the heap """
return len(self.__data)
def parent(self, index):
""" return parent index """
return (index + 1) // 2 - 1
def left_child(self, index):
""" return index of left child """
return 2 * index + 1
def right_child(self, index):
""" return index of right child """
return 2 * index + 2
def __str__(self):
# string lenght for center
strlen = 2 * 2 ** ceil(log(self.heap_size(), 2))
maxlevel = int(log(self.heap_size(), 2)) + 1
# add root element to string
string = str([self.__data[0]]).center(strlen) + '\n'
for index in range(1, maxlevel):
# get list of elements for current level
lst = self.__data[2 ** index - 1:2 ** (index + 1) - 1]
if index == maxlevel - 1:
# without center for last line
string += str(lst) + '\n'
else:
string += str(lst).center(strlen) + '\n'
return string
if __name__ in "__main__":
HEAP = MaxHeap()
LIST = [4, 1, 3, 2, 16, 9, 10, 14, 8, 7]
print("Build heap from list: {}".format(LIST))
HEAP.add_list(LIST)
print("Show heap:\n{}".format(HEAP))
for VALUE in [100]:
print("Add new element {}".format(VALUE))
HEAP.add(VALUE)
print("Show heap:\n{}".format(HEAP))
for _ in range(2):
MAX = HEAP.extract_max()
print("Extract max element: {}".format(MAX))
print("Show heap:\n{}".format(HEAP))
| mit |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/errors/types/billing_setup_error.py | 1 | 1902 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.errors',
marshal='google.ads.googleads.v7',
manifest={
'BillingSetupErrorEnum',
},
)
class BillingSetupErrorEnum(proto.Message):
r"""Container for enum describing possible billing setup errors. """
class BillingSetupError(proto.Enum):
r"""Enum describing possible billing setup errors."""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_USE_EXISTING_AND_NEW_ACCOUNT = 2
CANNOT_REMOVE_STARTED_BILLING_SETUP = 3
CANNOT_CHANGE_BILLING_TO_SAME_PAYMENTS_ACCOUNT = 4
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_STATUS = 5
INVALID_PAYMENTS_ACCOUNT = 6
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_CATEGORY = 7
INVALID_START_TIME_TYPE = 8
THIRD_PARTY_ALREADY_HAS_BILLING = 9
BILLING_SETUP_IN_PROGRESS = 10
NO_SIGNUP_PERMISSION = 11
CHANGE_OF_BILL_TO_IN_PROGRESS = 12
PAYMENTS_PROFILE_NOT_FOUND = 13
PAYMENTS_ACCOUNT_NOT_FOUND = 14
PAYMENTS_PROFILE_INELIGIBLE = 15
PAYMENTS_ACCOUNT_INELIGIBLE = 16
CUSTOMER_NEEDS_INTERNAL_APPROVAL = 17
PAYMENTS_ACCOUNT_INELIGIBLE_CURRENCY_CODE_MISMATCH = 19
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
arcz/ansible-modules-core | cloud/google/gce_lb.py | 130 | 12230 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_lb
version_added: "1.5"
short_description: create/destroy GCE load-balancer resources
description:
- This module can create and destroy Google Compute Engine C(loadbalancer)
and C(httphealthcheck) resources. The primary LB resource is the
C(load_balancer) resource and the health check parameters are all
prefixed with I(httphealthcheck).
The full documentation for Google Compute Engine load balancing is at
U(https://developers.google.com/compute/docs/load-balancing/). However,
the ansible module simplifies the configuration by following the
libcloud model.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
httphealthcheck_name:
description:
- the name identifier for the HTTP health check
required: false
default: null
httphealthcheck_port:
description:
- the TCP port to use for HTTP health checking
required: false
default: 80
httphealthcheck_path:
description:
- the url path to use for HTTP health checking
required: false
default: "/"
httphealthcheck_interval:
description:
- the duration in seconds between each health check request
required: false
default: 5
httphealthcheck_timeout:
description:
- the timeout in seconds before a request is considered a failed check
required: false
default: 5
httphealthcheck_unhealthy_count:
description:
- number of consecutive failed checks before marking a node unhealthy
required: false
default: 2
httphealthcheck_healthy_count:
description:
- number of consecutive successful checks before marking a node healthy
required: false
default: 2
httphealthcheck_host:
description:
- host header to pass through on HTTP check requests
required: false
default: null
name:
description:
- name of the load-balancer resource
required: false
default: null
protocol:
description:
- the protocol used for the load-balancer packet forwarding, tcp or udp
required: false
default: "tcp"
choices: ['tcp', 'udp']
region:
description:
- the GCE region where the load-balancer is defined
required: false
external_ip:
description:
- the external static IPv4 (or auto-assigned) address for the LB
required: false
default: null
port_range:
description:
- the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
required: false
default: null
members:
description:
- a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
required: false
aliases: ['nodes']
state:
description:
- desired state of the LB
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Simple example of creating a new LB, adding members, and a health check
- local_action:
module: gce_lb
name: testlb
region: us-central1
members: ["us-central1-a/www-a", "us-central1-b/www-b"]
httphealthcheck_name: hc
httphealthcheck_port: 80
httphealthcheck_path: "/up"
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
httphealthcheck_name = dict(),
httphealthcheck_port = dict(default=80),
httphealthcheck_path = dict(default='/'),
httphealthcheck_interval = dict(default=5),
httphealthcheck_timeout = dict(default=5),
httphealthcheck_unhealthy_count = dict(default=2),
httphealthcheck_healthy_count = dict(default=2),
httphealthcheck_host = dict(),
name = dict(),
protocol = dict(default='tcp'),
region = dict(),
external_ip = dict(),
port_range = dict(),
members = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.')
gce = gce_connect(module)
httphealthcheck_name = module.params.get('httphealthcheck_name')
httphealthcheck_port = module.params.get('httphealthcheck_port')
httphealthcheck_path = module.params.get('httphealthcheck_path')
httphealthcheck_interval = module.params.get('httphealthcheck_interval')
httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
httphealthcheck_unhealthy_count = \
module.params.get('httphealthcheck_unhealthy_count')
httphealthcheck_healthy_count = \
module.params.get('httphealthcheck_healthy_count')
httphealthcheck_host = module.params.get('httphealthcheck_host')
name = module.params.get('name')
protocol = module.params.get('protocol')
region = module.params.get('region')
external_ip = module.params.get('external_ip')
port_range = module.params.get('port_range')
members = module.params.get('members')
state = module.params.get('state')
try:
gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
gcelb.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = False
json_output = {'name': name, 'state': state}
if not name and not httphealthcheck_name:
module.fail_json(msg='Nothing to do, please specify a "name" ' + \
'or "httphealthcheck_name" parameter', changed=False)
if state in ['active', 'present']:
# first, create the httphealthcheck if requested
hc = None
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
host=httphealthcheck_host, path=httphealthcheck_path,
port=httphealthcheck_port,
interval=httphealthcheck_interval,
timeout=httphealthcheck_timeout,
unhealthy_threshold=httphealthcheck_unhealthy_count,
healthy_threshold=httphealthcheck_healthy_count)
changed = True
except ResourceExistsError:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if hc is not None:
json_output['httphealthcheck_host'] = hc.extra['host']
json_output['httphealthcheck_path'] = hc.path
json_output['httphealthcheck_port'] = hc.port
json_output['httphealthcheck_interval'] = hc.interval
json_output['httphealthcheck_timeout'] = hc.timeout
json_output['httphealthcheck_unhealthy_count'] = \
hc.unhealthy_threshold
json_output['httphealthcheck_healthy_count'] = \
hc.healthy_threshold
# create the forwarding rule (and target pool under the hood)
lb = None
if name:
if not region:
module.fail_json(msg='Missing required region name',
changed=False)
nodes = []
output_nodes = []
json_output['name'] = name
# members is a python list of 'zone/inst' strings
if members:
for node in members:
try:
zone, node_name = node.split('/')
nodes.append(gce.ex_get_node(node_name, zone))
output_nodes.append(node)
except:
# skip nodes that are badly formatted or don't exist
pass
try:
if hc is not None:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_healthchecks=[hc],
ex_address=external_ip)
else:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_address=external_ip)
changed = True
except ResourceExistsError:
lb = gcelb.get_balancer(name)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if lb is not None:
json_output['members'] = output_nodes
json_output['protocol'] = protocol
json_output['region'] = region
json_output['external_ip'] = lb.ip
json_output['port_range'] = lb.port
hc_names = []
if 'healthchecks' in lb.extra:
for hc in lb.extra['healthchecks']:
hc_names.append(hc.name)
json_output['httphealthchecks'] = hc_names
if state in ['absent', 'deleted']:
# first, delete the load balancer (forwarding rule and target pool)
# if specified.
if name:
json_output['name'] = name
try:
lb = gcelb.get_balancer(name)
gcelb.destroy_balancer(lb)
changed = True
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# destroy the health check if specified
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
gce.ex_destroy_healthcheck(hc)
changed = True
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ojii/django-nani | hvad/tests/forms.py | 1 | 5295 | # -*- coding: utf-8 -*-
from django.core.exceptions import FieldError
from hvad.forms import TranslatableModelForm, TranslatableModelFormMetaclass
from hvad.test_utils.context_managers import LanguageOverride
from hvad.test_utils.testcase import NaniTestCase
from testproject.app.models import Normal
from django.db import models
class NormalForm(TranslatableModelForm):
class Meta:
model = Normal
fields = ['shared_field', 'translated_field']
class NormalMediaForm(TranslatableModelForm):
class Meta:
model = Normal
class Media:
css = {
'all': ('layout.css',)
}
class NormalFormExclude(TranslatableModelForm):
class Meta:
model = Normal
exclude = ['shared_field']
class FormTests(NaniTestCase):
def test_nontranslatablemodelform(self):
# Make sure that TranslatableModelForm won't accept a regular model
# "Fake" model to use for the TranslatableModelForm
class NonTranslatableModel(models.Model):
field = models.CharField(max_length=128)
# Meta class for use below
class Meta:
model = NonTranslatableModel
# Make sure we do indeed get an exception, if we try to initialise it
self.assertRaises(TypeError,
TranslatableModelFormMetaclass,
'NonTranslatableModelForm', (TranslatableModelForm,),
{'Meta': Meta}
)
def test_normal_model_form_instantiation(self):
# Basic example and checking it gives us all the fields needed
form = NormalForm()
self.assertTrue("translated_field" in form.fields)
self.assertTrue("shared_field" in form.fields)
self.assertTrue("translated_field" in form.base_fields)
self.assertTrue("shared_field" in form.base_fields)
self.assertFalse(form.is_valid())
# Check if it works with media argument too
form = NormalMediaForm()
self.assertFalse(form.is_valid())
self.assertTrue("layout.css" in str(form.media))
# Check if it works with an instance of Normal
form = NormalForm(instance=Normal())
self.assertFalse(form.is_valid())
def test_normal_model_form_valid(self):
SHARED = 'Shared'
TRANSLATED = 'English'
data = {
'shared_field': SHARED,
'translated_field': TRANSLATED,
'language_code': 'en'
}
form = NormalForm(data)
self.assertTrue(form.is_valid(), form.errors.as_text())
self.assertTrue("translated_field" in form.fields)
self.assertTrue("shared_field" in form.fields)
self.assertTrue(TRANSLATED in form.clean()["translated_field"])
self.assertTrue(SHARED in form.clean()["shared_field"])
def test_normal_model_form_initaldata_instance(self):
# Check if it accepts inital data and instance
SHARED = 'Shared'
TRANSLATED = 'English'
data = {
'shared_field': SHARED,
'translated_field': TRANSLATED,
'language_code': 'en'
}
form = NormalForm(data, instance=Normal(), initial=data)
self.assertTrue(form.is_valid(), form.errors.as_text())
def test_normal_model_form_existing_instance(self):
# Check if it works with an existing instance of Normal
SHARED = 'Shared'
TRANSLATED = 'English'
instance = Normal.objects.language("en").create(shared_field=SHARED, translated_field=TRANSLATED)
form = NormalForm(instance=instance)
self.assertFalse(form.is_valid())
self.assertTrue(SHARED in form.as_p())
self.assertTrue(TRANSLATED in form.as_p())
def test_normal_model_form_save(self):
with LanguageOverride('en'):
SHARED = 'Shared'
TRANSLATED = 'English'
data = {
'shared_field': SHARED,
'translated_field': TRANSLATED,
'language_code': 'en'
}
form = NormalForm(data)
# tested a non-translated ModelForm, and that takes 7 queries.
with self.assertNumQueries(2):
obj = form.save()
with self.assertNumQueries(0):
self.assertEqual(obj.shared_field, SHARED)
self.assertEqual(obj.translated_field, TRANSLATED)
self.assertNotEqual(obj.pk, None)
def test_no_language_code_in_fields(self):
with LanguageOverride("en"):
form = NormalForm()
self.assertFalse(form.fields.has_key("language_code"))
form = NormalMediaForm()
self.assertFalse(form.fields.has_key("language_code"))
form = NormalFormExclude()
self.assertFalse(form.fields.has_key("language_code"))
def test_form_wrong_field_in_class(self):
with LanguageOverride("en"):
def create_wrong_form():
class WrongForm(TranslatableModelForm):
class Meta:
model = Normal
fields = ['a_field_that_doesnt_exist']
form = WrongForm()
self.assertRaises(FieldError, create_wrong_form)
| bsd-3-clause |
pravsripad/mne-python | mne/io/constants.py | 4 | 42745 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from ..utils._bunch import BunchConstNamed
FIFF = BunchConstNamed()
#
# FIFF version number in use
#
FIFF.FIFFC_MAJOR_VERSION = 1
FIFF.FIFFC_MINOR_VERSION = 4
FIFF.FIFFC_VERSION = FIFF.FIFFC_MAJOR_VERSION << 16 | FIFF.FIFFC_MINOR_VERSION
#
# Blocks
#
FIFF.FIFFB_ROOT = 999
FIFF.FIFFB_MEAS = 100
FIFF.FIFFB_MEAS_INFO = 101
FIFF.FIFFB_RAW_DATA = 102
FIFF.FIFFB_PROCESSED_DATA = 103
FIFF.FIFFB_EVOKED = 104
FIFF.FIFFB_ASPECT = 105
FIFF.FIFFB_SUBJECT = 106
FIFF.FIFFB_ISOTRAK = 107
FIFF.FIFFB_HPI_MEAS = 108 # HPI measurement
FIFF.FIFFB_HPI_RESULT = 109 # Result of a HPI fitting procedure
FIFF.FIFFB_HPI_COIL = 110 # Data acquired from one HPI coil
FIFF.FIFFB_PROJECT = 111
FIFF.FIFFB_CONTINUOUS_DATA = 112
FIFF.FIFFB_CH_INFO = 113 # Extra channel information
FIFF.FIFFB_VOID = 114
FIFF.FIFFB_EVENTS = 115
FIFF.FIFFB_INDEX = 116
FIFF.FIFFB_DACQ_PARS = 117
FIFF.FIFFB_REF = 118
FIFF.FIFFB_IAS_RAW_DATA = 119
FIFF.FIFFB_IAS_ASPECT = 120
FIFF.FIFFB_HPI_SUBSYSTEM = 121
# FIFF.FIFFB_PHANTOM_SUBSYSTEM = 122
# FIFF.FIFFB_STATUS_SUBSYSTEM = 123
FIFF.FIFFB_DEVICE = 124
FIFF.FIFFB_HELIUM = 125
FIFF.FIFFB_CHANNEL_INFO = 126
FIFF.FIFFB_SPHERE = 300 # Concentric sphere model related
FIFF.FIFFB_BEM = 310 # Boundary-element method
FIFF.FIFFB_BEM_SURF = 311 # Boundary-element method surfaces
FIFF.FIFFB_CONDUCTOR_MODEL = 312 # One conductor model definition
FIFF.FIFFB_PROJ = 313
FIFF.FIFFB_PROJ_ITEM = 314
FIFF.FIFFB_MRI = 200
FIFF.FIFFB_MRI_SET = 201
FIFF.FIFFB_MRI_SLICE = 202
FIFF.FIFFB_MRI_SCENERY = 203 # These are for writing unrelated 'slices'
FIFF.FIFFB_MRI_SCENE = 204 # Which are actually 3D scenes...
FIFF.FIFFB_MRI_SEG = 205 # MRI segmentation data
FIFF.FIFFB_MRI_SEG_REGION = 206 # One MRI segmentation region
FIFF.FIFFB_PROCESSING_HISTORY = 900
FIFF.FIFFB_PROCESSING_RECORD = 901
FIFF.FIFFB_DATA_CORRECTION = 500
FIFF.FIFFB_CHANNEL_DECOUPLER = 501
FIFF.FIFFB_SSS_INFO = 502
FIFF.FIFFB_SSS_CAL = 503
FIFF.FIFFB_SSS_ST_INFO = 504
FIFF.FIFFB_SSS_BASES = 505
FIFF.FIFFB_IAS = 510
#
# Of general interest
#
FIFF.FIFF_FILE_ID = 100
FIFF.FIFF_DIR_POINTER = 101
FIFF.FIFF_BLOCK_ID = 103
FIFF.FIFF_BLOCK_START = 104
FIFF.FIFF_BLOCK_END = 105
FIFF.FIFF_FREE_LIST = 106
FIFF.FIFF_FREE_BLOCK = 107
FIFF.FIFF_NOP = 108
FIFF.FIFF_PARENT_FILE_ID = 109
FIFF.FIFF_PARENT_BLOCK_ID = 110
FIFF.FIFF_BLOCK_NAME = 111
FIFF.FIFF_BLOCK_VERSION = 112
FIFF.FIFF_CREATOR = 113 # Program that created the file (string)
FIFF.FIFF_MODIFIER = 114 # Program that modified the file (string)
FIFF.FIFF_REF_ROLE = 115
FIFF.FIFF_REF_FILE_ID = 116
FIFF.FIFF_REF_FILE_NUM = 117
FIFF.FIFF_REF_FILE_NAME = 118
#
# Megacq saves the parameters in these tags
#
FIFF.FIFF_DACQ_PARS = 150
FIFF.FIFF_DACQ_STIM = 151
FIFF.FIFF_DEVICE_TYPE = 152
FIFF.FIFF_DEVICE_MODEL = 153
FIFF.FIFF_DEVICE_SERIAL = 154
FIFF.FIFF_DEVICE_SITE = 155
FIFF.FIFF_HE_LEVEL_RAW = 156
FIFF.FIFF_HELIUM_LEVEL = 157
FIFF.FIFF_ORIG_FILE_GUID = 158
FIFF.FIFF_UTC_OFFSET = 159
FIFF.FIFF_NCHAN = 200
FIFF.FIFF_SFREQ = 201
FIFF.FIFF_DATA_PACK = 202
FIFF.FIFF_CH_INFO = 203
FIFF.FIFF_MEAS_DATE = 204
FIFF.FIFF_SUBJECT = 205
FIFF.FIFF_COMMENT = 206
FIFF.FIFF_NAVE = 207
FIFF.FIFF_FIRST_SAMPLE = 208 # The first sample of an epoch
FIFF.FIFF_LAST_SAMPLE = 209 # The last sample of an epoch
FIFF.FIFF_ASPECT_KIND = 210
FIFF.FIFF_REF_EVENT = 211
FIFF.FIFF_EXPERIMENTER = 212
FIFF.FIFF_DIG_POINT = 213
FIFF.FIFF_CH_POS = 214
FIFF.FIFF_HPI_SLOPES = 215 # HPI data
FIFF.FIFF_HPI_NCOIL = 216
FIFF.FIFF_REQ_EVENT = 217
FIFF.FIFF_REQ_LIMIT = 218
FIFF.FIFF_LOWPASS = 219
FIFF.FIFF_BAD_CHS = 220
FIFF.FIFF_ARTEF_REMOVAL = 221
FIFF.FIFF_COORD_TRANS = 222
FIFF.FIFF_HIGHPASS = 223
FIFF.FIFF_CH_CALS = 224 # This will not occur in new files
FIFF.FIFF_HPI_BAD_CHS = 225 # List of channels considered to be bad in hpi
FIFF.FIFF_HPI_CORR_COEFF = 226 # HPI curve fit correlations
FIFF.FIFF_EVENT_COMMENT = 227 # Comment about the events used in averaging
FIFF.FIFF_NO_SAMPLES = 228 # Number of samples in an epoch
FIFF.FIFF_FIRST_TIME = 229 # Time scale minimum
FIFF.FIFF_SUBAVE_SIZE = 230 # Size of a subaverage
FIFF.FIFF_SUBAVE_FIRST = 231 # The first epoch # contained in the subaverage
FIFF.FIFF_NAME = 233 # Intended to be a short name.
FIFF.FIFF_DESCRIPTION = FIFF.FIFF_COMMENT # (Textual) Description of an object
FIFF.FIFF_DIG_STRING = 234 # String of digitized points
FIFF.FIFF_LINE_FREQ = 235 # Line frequency
FIFF.FIFF_GANTRY_ANGLE = 282 # Tilt angle of the gantry in degrees.
#
# HPI fitting program tags
#
FIFF.FIFF_HPI_COIL_FREQ = 236 # HPI coil excitation frequency
FIFF.FIFF_HPI_COIL_MOMENTS = 240 # Estimated moment vectors for the HPI coil magnetic dipoles
FIFF.FIFF_HPI_FIT_GOODNESS = 241 # Three floats indicating the goodness of fit
FIFF.FIFF_HPI_FIT_ACCEPT = 242 # Bitmask indicating acceptance (see below)
FIFF.FIFF_HPI_FIT_GOOD_LIMIT = 243 # Limit for the goodness-of-fit
FIFF.FIFF_HPI_FIT_DIST_LIMIT = 244 # Limit for the coil distance difference
FIFF.FIFF_HPI_COIL_NO = 245 # Coil number listed by HPI measurement
FIFF.FIFF_HPI_COILS_USED = 246 # List of coils finally used when the transformation was computed
FIFF.FIFF_HPI_DIGITIZATION_ORDER = 247 # Which Isotrak digitization point corresponds to each of the coils energized
#
# Tags used for storing channel info
#
FIFF.FIFF_CH_SCAN_NO = 250 # Channel scan number. Corresponds to fiffChInfoRec.scanNo field
FIFF.FIFF_CH_LOGICAL_NO = 251 # Channel logical number. Corresponds to fiffChInfoRec.logNo field
FIFF.FIFF_CH_KIND = 252 # Channel type. Corresponds to fiffChInfoRec.kind field"
FIFF.FIFF_CH_RANGE = 253 # Conversion from recorded number to (possibly virtual) voltage at the output"
FIFF.FIFF_CH_CAL = 254 # Calibration coefficient from output voltage to some real units
FIFF.FIFF_CH_LOC = 255 # Channel loc
FIFF.FIFF_CH_UNIT = 256 # Unit of the data
FIFF.FIFF_CH_UNIT_MUL = 257 # Unit multiplier exponent
FIFF.FIFF_CH_DACQ_NAME = 258 # Name of the channel in the data acquisition system. Corresponds to fiffChInfoRec.name.
FIFF.FIFF_CH_COIL_TYPE = 350 # Coil type in coil_def.dat
FIFF.FIFF_CH_COORD_FRAME = 351 # Coordinate frame (integer)
#
# Pointers
#
FIFF.FIFFV_NEXT_SEQ = 0
FIFF.FIFFV_NEXT_NONE = -1
#
# Channel types
#
FIFF.FIFFV_BIO_CH = 102
FIFF.FIFFV_MEG_CH = 1
FIFF.FIFFV_REF_MEG_CH = 301
FIFF.FIFFV_EEG_CH = 2
FIFF.FIFFV_MCG_CH = 201
FIFF.FIFFV_STIM_CH = 3
FIFF.FIFFV_EOG_CH = 202
FIFF.FIFFV_EMG_CH = 302
FIFF.FIFFV_ECG_CH = 402
FIFF.FIFFV_MISC_CH = 502
FIFF.FIFFV_RESP_CH = 602 # Respiration monitoring
FIFF.FIFFV_SEEG_CH = 802 # stereotactic EEG
FIFF.FIFFV_DBS_CH = 803 # deep brain stimulation
FIFF.FIFFV_SYST_CH = 900 # some system status information (on Triux systems only)
FIFF.FIFFV_ECOG_CH = 902
FIFF.FIFFV_IAS_CH = 910 # Internal Active Shielding data (maybe on Triux only)
FIFF.FIFFV_EXCI_CH = 920 # flux excitation channel used to be a stimulus channel
FIFF.FIFFV_DIPOLE_WAVE = 1000 # Dipole time curve (xplotter/xfit)
FIFF.FIFFV_GOODNESS_FIT = 1001 # Goodness of fit (xplotter/xfit)
FIFF.FIFFV_FNIRS_CH = 1100 # Functional near-infrared spectroscopy
_ch_kind_named = {key: key for key in (
FIFF.FIFFV_BIO_CH,
FIFF.FIFFV_MEG_CH,
FIFF.FIFFV_REF_MEG_CH,
FIFF.FIFFV_EEG_CH,
FIFF.FIFFV_MCG_CH,
FIFF.FIFFV_STIM_CH,
FIFF.FIFFV_EOG_CH,
FIFF.FIFFV_EMG_CH,
FIFF.FIFFV_ECG_CH,
FIFF.FIFFV_MISC_CH,
FIFF.FIFFV_RESP_CH,
FIFF.FIFFV_SEEG_CH,
FIFF.FIFFV_DBS_CH,
FIFF.FIFFV_SYST_CH,
FIFF.FIFFV_ECOG_CH,
FIFF.FIFFV_IAS_CH,
FIFF.FIFFV_EXCI_CH,
FIFF.FIFFV_DIPOLE_WAVE,
FIFF.FIFFV_GOODNESS_FIT,
FIFF.FIFFV_FNIRS_CH,
)}
#
# Quaternion channels for head position monitoring
#
FIFF.FIFFV_QUAT_0 = 700 # Quaternion param q0 obsolete for unit quaternion
FIFF.FIFFV_QUAT_1 = 701 # Quaternion param q1 rotation
FIFF.FIFFV_QUAT_2 = 702 # Quaternion param q2 rotation
FIFF.FIFFV_QUAT_3 = 703 # Quaternion param q3 rotation
FIFF.FIFFV_QUAT_4 = 704 # Quaternion param q4 translation
FIFF.FIFFV_QUAT_5 = 705 # Quaternion param q5 translation
FIFF.FIFFV_QUAT_6 = 706 # Quaternion param q6 translation
FIFF.FIFFV_HPI_G = 707 # Goodness-of-fit in continuous hpi
FIFF.FIFFV_HPI_ERR = 708 # Estimation error in continuous hpi
FIFF.FIFFV_HPI_MOV = 709 # Estimated head movement speed in continuous hpi
#
# Coordinate frames
#
FIFF.FIFFV_COORD_UNKNOWN = 0
FIFF.FIFFV_COORD_DEVICE = 1
FIFF.FIFFV_COORD_ISOTRAK = 2
FIFF.FIFFV_COORD_HPI = 3
FIFF.FIFFV_COORD_HEAD = 4
FIFF.FIFFV_COORD_MRI = 5
FIFF.FIFFV_COORD_MRI_SLICE = 6
FIFF.FIFFV_COORD_MRI_DISPLAY = 7
FIFF.FIFFV_COORD_DICOM_DEVICE = 8
FIFF.FIFFV_COORD_IMAGING_DEVICE = 9
_coord_frame_named = {key: key for key in (
FIFF.FIFFV_COORD_UNKNOWN,
FIFF.FIFFV_COORD_DEVICE,
FIFF.FIFFV_COORD_ISOTRAK,
FIFF.FIFFV_COORD_HPI,
FIFF.FIFFV_COORD_HEAD,
FIFF.FIFFV_COORD_MRI,
FIFF.FIFFV_COORD_MRI_SLICE,
FIFF.FIFFV_COORD_MRI_DISPLAY,
FIFF.FIFFV_COORD_DICOM_DEVICE,
FIFF.FIFFV_COORD_IMAGING_DEVICE,
)}
#
# Needed for raw and evoked-response data
#
FIFF.FIFF_DATA_BUFFER = 300 # Buffer containing measurement data
FIFF.FIFF_DATA_SKIP = 301 # Data skip in buffers
FIFF.FIFF_EPOCH = 302 # Buffer containing one epoch and channel
FIFF.FIFF_DATA_SKIP_SAMP = 303 # Data skip in samples
#
# Info on subject
#
FIFF.FIFF_SUBJ_ID = 400 # Subject ID
FIFF.FIFF_SUBJ_FIRST_NAME = 401 # First name of the subject
FIFF.FIFF_SUBJ_MIDDLE_NAME = 402 # Middle name of the subject
FIFF.FIFF_SUBJ_LAST_NAME = 403 # Last name of the subject
FIFF.FIFF_SUBJ_BIRTH_DAY = 404 # Birthday of the subject
FIFF.FIFF_SUBJ_SEX = 405 # Sex of the subject
FIFF.FIFF_SUBJ_HAND = 406 # Handedness of the subject
FIFF.FIFF_SUBJ_WEIGHT = 407 # Weight of the subject in kg
FIFF.FIFF_SUBJ_HEIGHT = 408 # Height of the subject in m
FIFF.FIFF_SUBJ_COMMENT = 409 # Comment about the subject
FIFF.FIFF_SUBJ_HIS_ID = 410 # ID used in the Hospital Information System
FIFF.FIFFV_SUBJ_HAND_RIGHT = 1 # Righthanded
FIFF.FIFFV_SUBJ_HAND_LEFT = 2 # Lefthanded
FIFF.FIFFV_SUBJ_HAND_AMBI = 3 # Ambidextrous
FIFF.FIFFV_SUBJ_SEX_UNKNOWN = 0 # Unknown gender
FIFF.FIFFV_SUBJ_SEX_MALE = 1 # Male
FIFF.FIFFV_SUBJ_SEX_FEMALE = 2 # Female
FIFF.FIFF_PROJ_ID = 500
FIFF.FIFF_PROJ_NAME = 501
FIFF.FIFF_PROJ_AIM = 502
FIFF.FIFF_PROJ_PERSONS = 503
FIFF.FIFF_PROJ_COMMENT = 504
FIFF.FIFF_EVENT_CHANNELS = 600 # Event channel numbers
FIFF.FIFF_EVENT_LIST = 601 # List of events (integers: <sample before after>
FIFF.FIFF_EVENT_CHANNEL = 602 # Event channel
FIFF.FIFF_EVENT_BITS = 603 # Event bits array
#
# Tags used in saving SQUID characteristics etc.
#
FIFF.FIFF_SQUID_BIAS = 701
FIFF.FIFF_SQUID_OFFSET = 702
FIFF.FIFF_SQUID_GATE = 703
#
# Aspect values used to save charactersitic curves of SQUIDs. (mjk)
#
FIFF.FIFFV_ASPECT_IFII_LOW = 1100
FIFF.FIFFV_ASPECT_IFII_HIGH = 1101
FIFF.FIFFV_ASPECT_GATE = 1102
#
# Values for file references
#
FIFF.FIFFV_ROLE_PREV_FILE = 1
FIFF.FIFFV_ROLE_NEXT_FILE = 2
#
# References
#
FIFF.FIFF_REF_PATH = 1101
#
# Different aspects of data
#
FIFF.FIFFV_ASPECT_AVERAGE = 100 # Normal average of epochs
FIFF.FIFFV_ASPECT_STD_ERR = 101 # Std. error of mean
FIFF.FIFFV_ASPECT_SINGLE = 102 # Single epoch cut out from the continuous data
FIFF.FIFFV_ASPECT_SUBAVERAGE = 103 # Partial average (subaverage)
FIFF.FIFFV_ASPECT_ALTAVERAGE = 104 # Alternating subaverage
FIFF.FIFFV_ASPECT_SAMPLE = 105 # A sample cut out by graph
FIFF.FIFFV_ASPECT_POWER_DENSITY = 106 # Power density spectrum
FIFF.FIFFV_ASPECT_DIPOLE_WAVE = 200 # Dipole amplitude curve
#
# BEM surface IDs
#
FIFF.FIFFV_BEM_SURF_ID_UNKNOWN = -1
FIFF.FIFFV_BEM_SURF_ID_NOT_KNOWN = 0
FIFF.FIFFV_BEM_SURF_ID_BRAIN = 1
FIFF.FIFFV_BEM_SURF_ID_CSF = 2
FIFF.FIFFV_BEM_SURF_ID_SKULL = 3
FIFF.FIFFV_BEM_SURF_ID_HEAD = 4
FIFF.FIFF_SPHERE_ORIGIN = 3001
FIFF.FIFF_SPHERE_RADIUS = 3002
FIFF.FIFF_BEM_SURF_ID = 3101 # int surface number
FIFF.FIFF_BEM_SURF_NAME = 3102 # string surface name
FIFF.FIFF_BEM_SURF_NNODE = 3103 # int number of nodes on a surface
FIFF.FIFF_BEM_SURF_NTRI = 3104 # int number of triangles on a surface
FIFF.FIFF_BEM_SURF_NODES = 3105 # float surface nodes (nnode,3)
FIFF.FIFF_BEM_SURF_TRIANGLES = 3106 # int surface triangles (ntri,3)
FIFF.FIFF_BEM_SURF_NORMALS = 3107 # float surface node normal unit vectors
FIFF.FIFF_BEM_POT_SOLUTION = 3110 # float ** The solution matrix
FIFF.FIFF_BEM_APPROX = 3111 # int approximation method, see below
FIFF.FIFF_BEM_COORD_FRAME = 3112 # The coordinate frame of the model
FIFF.FIFF_BEM_SIGMA = 3113 # Conductivity of a compartment
FIFF.FIFFV_BEM_APPROX_CONST = 1 # The constant potential approach
FIFF.FIFFV_BEM_APPROX_LINEAR = 2 # The linear potential approach
#
# More of those defined in MNE
#
FIFF.FIFFV_MNE_SURF_UNKNOWN = -1
FIFF.FIFFV_MNE_SURF_LEFT_HEMI = 101
FIFF.FIFFV_MNE_SURF_RIGHT_HEMI = 102
FIFF.FIFFV_MNE_SURF_MEG_HELMET = 201 # Use this irrespective of the system
#
# These relate to the Isotrak data (enum(point))
#
FIFF.FIFFV_POINT_CARDINAL = 1
FIFF.FIFFV_POINT_HPI = 2
FIFF.FIFFV_POINT_EEG = 3
FIFF.FIFFV_POINT_ECG = FIFF.FIFFV_POINT_EEG
FIFF.FIFFV_POINT_EXTRA = 4
FIFF.FIFFV_POINT_HEAD = 5 # Point on the surface of the head
_dig_kind_named = {key: key for key in(
FIFF.FIFFV_POINT_CARDINAL,
FIFF.FIFFV_POINT_HPI,
FIFF.FIFFV_POINT_EEG,
FIFF.FIFFV_POINT_EXTRA,
FIFF.FIFFV_POINT_HEAD,
)}
#
# Cardinal point types (enum(cardinal_point))
#
FIFF.FIFFV_POINT_LPA = 1
FIFF.FIFFV_POINT_NASION = 2
FIFF.FIFFV_POINT_RPA = 3
FIFF.FIFFV_POINT_INION = 4
_dig_cardinal_named = {key: key for key in (
FIFF.FIFFV_POINT_LPA,
FIFF.FIFFV_POINT_NASION,
FIFF.FIFFV_POINT_RPA,
FIFF.FIFFV_POINT_INION,
)}
#
# SSP
#
FIFF.FIFF_PROJ_ITEM_KIND = 3411
FIFF.FIFF_PROJ_ITEM_TIME = 3412
FIFF.FIFF_PROJ_ITEM_NVEC = 3414
FIFF.FIFF_PROJ_ITEM_VECTORS = 3415
FIFF.FIFF_PROJ_ITEM_DEFINITION = 3416
FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST = 3417
# XPlotter
FIFF.FIFF_XPLOTTER_LAYOUT = 3501 # string - "Xplotter layout tag"
#
# MRIs
#
FIFF.FIFF_MRI_SOURCE_PATH = FIFF.FIFF_REF_PATH
FIFF.FIFF_MRI_SOURCE_FORMAT = 2002
FIFF.FIFF_MRI_PIXEL_ENCODING = 2003
FIFF.FIFF_MRI_PIXEL_DATA_OFFSET = 2004
FIFF.FIFF_MRI_PIXEL_SCALE = 2005
FIFF.FIFF_MRI_PIXEL_DATA = 2006
FIFF.FIFF_MRI_PIXEL_OVERLAY_ENCODING = 2007
FIFF.FIFF_MRI_PIXEL_OVERLAY_DATA = 2008
FIFF.FIFF_MRI_BOUNDING_BOX = 2009
FIFF.FIFF_MRI_WIDTH = 2010
FIFF.FIFF_MRI_WIDTH_M = 2011
FIFF.FIFF_MRI_HEIGHT = 2012
FIFF.FIFF_MRI_HEIGHT_M = 2013
FIFF.FIFF_MRI_DEPTH = 2014
FIFF.FIFF_MRI_DEPTH_M = 2015
FIFF.FIFF_MRI_THICKNESS = 2016
FIFF.FIFF_MRI_SCENE_AIM = 2017
FIFF.FIFF_MRI_ORIG_SOURCE_PATH = 2020
FIFF.FIFF_MRI_ORIG_SOURCE_FORMAT = 2021
FIFF.FIFF_MRI_ORIG_PIXEL_ENCODING = 2022
FIFF.FIFF_MRI_ORIG_PIXEL_DATA_OFFSET = 2023
FIFF.FIFF_MRI_VOXEL_DATA = 2030
FIFF.FIFF_MRI_VOXEL_ENCODING = 2031
FIFF.FIFF_MRI_MRILAB_SETUP = 2100
FIFF.FIFF_MRI_SEG_REGION_ID = 2200
#
FIFF.FIFFV_MRI_PIXEL_UNKNOWN = 0
FIFF.FIFFV_MRI_PIXEL_BYTE = 1
FIFF.FIFFV_MRI_PIXEL_WORD = 2
FIFF.FIFFV_MRI_PIXEL_SWAP_WORD = 3
FIFF.FIFFV_MRI_PIXEL_FLOAT = 4
FIFF.FIFFV_MRI_PIXEL_BYTE_INDEXED_COLOR = 5
FIFF.FIFFV_MRI_PIXEL_BYTE_RGB_COLOR = 6
FIFF.FIFFV_MRI_PIXEL_BYTE_RLE_RGB_COLOR = 7
FIFF.FIFFV_MRI_PIXEL_BIT_RLE = 8
#
# These are the MNE fiff definitions (range 350-390 reserved for MNE)
#
FIFF.FIFFB_MNE = 350
FIFF.FIFFB_MNE_SOURCE_SPACE = 351
FIFF.FIFFB_MNE_FORWARD_SOLUTION = 352
FIFF.FIFFB_MNE_PARENT_MRI_FILE = 353
FIFF.FIFFB_MNE_PARENT_MEAS_FILE = 354
FIFF.FIFFB_MNE_COV = 355
FIFF.FIFFB_MNE_INVERSE_SOLUTION = 356
FIFF.FIFFB_MNE_NAMED_MATRIX = 357
FIFF.FIFFB_MNE_ENV = 358
FIFF.FIFFB_MNE_BAD_CHANNELS = 359
FIFF.FIFFB_MNE_VERTEX_MAP = 360
FIFF.FIFFB_MNE_EVENTS = 361
FIFF.FIFFB_MNE_MORPH_MAP = 362
FIFF.FIFFB_MNE_SURFACE_MAP = 363
FIFF.FIFFB_MNE_SURFACE_MAP_GROUP = 364
#
# CTF compensation data
#
FIFF.FIFFB_MNE_CTF_COMP = 370
FIFF.FIFFB_MNE_CTF_COMP_DATA = 371
FIFF.FIFFB_MNE_DERIVATIONS = 372
FIFF.FIFFB_MNE_EPOCHS = 373
FIFF.FIFFB_MNE_ICA = 374
#
# Fiff tags associated with MNE computations (3500...)
#
#
# 3500... Bookkeeping
#
FIFF.FIFF_MNE_ROW_NAMES = 3502
FIFF.FIFF_MNE_COL_NAMES = 3503
FIFF.FIFF_MNE_NROW = 3504
FIFF.FIFF_MNE_NCOL = 3505
FIFF.FIFF_MNE_COORD_FRAME = 3506 # Coordinate frame employed. Defaults:
# FIFFB_MNE_SOURCE_SPACE FIFFV_COORD_MRI
# FIFFB_MNE_FORWARD_SOLUTION FIFFV_COORD_HEAD
# FIFFB_MNE_INVERSE_SOLUTION FIFFV_COORD_HEAD
FIFF.FIFF_MNE_CH_NAME_LIST = 3507
FIFF.FIFF_MNE_FILE_NAME = 3508 # This removes the collision with fiff_file.h (used to be 3501)
#
# 3510... 3590... Source space or surface
#
FIFF.FIFF_MNE_SOURCE_SPACE_POINTS = 3510 # The vertices
FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS = 3511 # The vertex normals
FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS = 3512 # How many vertices
FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION = 3513 # Which are selected to the source space
FIFF.FIFF_MNE_SOURCE_SPACE_NUSE = 3514 # How many are in use
FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST = 3515 # Nearest source space vertex for all vertices
FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST = 3516 # Distance to the Nearest source space vertex for all vertices
FIFF.FIFF_MNE_SOURCE_SPACE_ID = 3517 # Identifier
FIFF.FIFF_MNE_SOURCE_SPACE_TYPE = 3518 # Surface or volume
FIFF.FIFF_MNE_SOURCE_SPACE_VERTICES = 3519 # List of vertices (zero based)
FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS = 3596 # Voxel space dimensions in a volume source space
FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR = 3597 # Matrix to interpolate a volume source space into a mri volume
FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE = 3598 # MRI file used in the interpolation
FIFF.FIFF_MNE_SOURCE_SPACE_NTRI = 3590 # Number of triangles
FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES = 3591 # The triangulation
FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI = 3592 # Number of triangles corresponding to the number of vertices in use
FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = 3593 # The triangulation of the used vertices in the source space
FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS = 3594 # Number of neighbors for each source space point (used for volume source spaces)
FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS = 3595 # Neighbors for each source space point (used for volume source spaces)
FIFF.FIFF_MNE_SOURCE_SPACE_DIST = 3599 # Distances between vertices in use (along the surface)
FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT = 3600 # If distance is above this limit (in the volume) it has not been calculated
FIFF.FIFF_MNE_SURFACE_MAP_DATA = 3610 # Surface map data
FIFF.FIFF_MNE_SURFACE_MAP_KIND = 3611 # Type of map
#
# 3520... Forward solution
#
FIFF.FIFF_MNE_FORWARD_SOLUTION = 3520
FIFF.FIFF_MNE_SOURCE_ORIENTATION = 3521 # Fixed or free
FIFF.FIFF_MNE_INCLUDED_METHODS = 3522
FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD = 3523
#
# 3530... Covariance matrix
#
FIFF.FIFF_MNE_COV_KIND = 3530 # What kind of a covariance matrix
FIFF.FIFF_MNE_COV_DIM = 3531 # Matrix dimension
FIFF.FIFF_MNE_COV = 3532 # Full matrix in packed representation (lower triangle)
FIFF.FIFF_MNE_COV_DIAG = 3533 # Diagonal matrix
FIFF.FIFF_MNE_COV_EIGENVALUES = 3534 # Eigenvalues and eigenvectors of the above
FIFF.FIFF_MNE_COV_EIGENVECTORS = 3535
FIFF.FIFF_MNE_COV_NFREE = 3536 # Number of degrees of freedom
FIFF.FIFF_MNE_COV_METHOD = 3537 # The estimator used
FIFF.FIFF_MNE_COV_SCORE = 3538 # Negative log-likelihood
#
# 3540... Inverse operator
#
# We store the inverse operator as the eigenleads, eigenfields,
# and weights
#
FIFF.FIFF_MNE_INVERSE_LEADS = 3540 # The eigenleads
FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED = 3546 # The eigenleads (already weighted with R^0.5)
FIFF.FIFF_MNE_INVERSE_FIELDS = 3541 # The eigenfields
FIFF.FIFF_MNE_INVERSE_SING = 3542 # The singular values
FIFF.FIFF_MNE_PRIORS_USED = 3543 # Which kind of priors have been used for the source covariance matrix
FIFF.FIFF_MNE_INVERSE_FULL = 3544 # Inverse operator as one matrix
# This matrix includes the whitening operator as well
# The regularization is applied
FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS = 3545 # Contains the orientation of one source per row
# The source orientations must be expressed in the coordinate system
# given by FIFF_MNE_COORD_FRAME
FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT = 3547 # Are the sources given in Am or Am/m^2 ?
#
# 3550... Saved environment info
#
FIFF.FIFF_MNE_ENV_WORKING_DIR = 3550 # Working directory where the file was created
FIFF.FIFF_MNE_ENV_COMMAND_LINE = 3551 # The command used to create the file
FIFF.FIFF_MNE_EXTERNAL_BIG_ENDIAN = 3552 # Reference to an external binary file (big-endian) */
FIFF.FIFF_MNE_EXTERNAL_LITTLE_ENDIAN = 3553 # Reference to an external binary file (little-endian) */
#
# 3560... Miscellaneous
#
FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE = 3560 # Is this projection item active?
FIFF.FIFF_MNE_EVENT_LIST = 3561 # An event list (for STI101 / STI 014)
FIFF.FIFF_MNE_HEMI = 3562 # Hemisphere association for general purposes
FIFF.FIFF_MNE_DATA_SKIP_NOP = 3563 # A data skip turned off in the raw data
FIFF.FIFF_MNE_ORIG_CH_INFO = 3564 # Channel information before any changes
FIFF.FIFF_MNE_EVENT_TRIGGER_MASK = 3565 # Mask applied to the trigger channel values
FIFF.FIFF_MNE_EVENT_COMMENTS = 3566 # Event comments merged into one long string
FIFF.FIFF_MNE_CUSTOM_REF = 3567 # Whether a custom reference was applied to the data
FIFF.FIFF_MNE_BASELINE_MIN = 3568 # Time of baseline beginning
FIFF.FIFF_MNE_BASELINE_MAX = 3569 # Time of baseline end
#
# 3570... Morphing maps
#
FIFF.FIFF_MNE_MORPH_MAP = 3570 # Mapping of closest vertices on the sphere
FIFF.FIFF_MNE_MORPH_MAP_FROM = 3571 # Which subject is this map from
FIFF.FIFF_MNE_MORPH_MAP_TO = 3572 # Which subject is this map to
#
# 3580... CTF compensation data
#
FIFF.FIFF_MNE_CTF_COMP_KIND = 3580 # What kind of compensation
FIFF.FIFF_MNE_CTF_COMP_DATA = 3581 # The compensation data itself
FIFF.FIFF_MNE_CTF_COMP_CALIBRATED = 3582 # Are the coefficients calibrated?
FIFF.FIFF_MNE_DERIVATION_DATA = 3585 # Used to store information about EEG and other derivations
#
# 3601... values associated with ICA decomposition
#
FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS = 3601 # ICA interface parameters
FIFF.FIFF_MNE_ICA_CHANNEL_NAMES = 3602 # ICA channel names
FIFF.FIFF_MNE_ICA_WHITENER = 3603 # ICA whitener
FIFF.FIFF_MNE_ICA_PCA_COMPONENTS = 3604 # PCA components
FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR = 3605 # PCA explained variance
FIFF.FIFF_MNE_ICA_PCA_MEAN = 3606 # PCA mean
FIFF.FIFF_MNE_ICA_MATRIX = 3607 # ICA unmixing matrix
FIFF.FIFF_MNE_ICA_BADS = 3608 # ICA bad sources
FIFF.FIFF_MNE_ICA_MISC_PARAMS = 3609 # ICA misc params
#
# Miscellaneous
#
FIFF.FIFF_MNE_KIT_SYSTEM_ID = 3612 # Unique ID assigned to KIT systems
#
# Maxfilter tags
#
FIFF.FIFF_SSS_FRAME = 263
FIFF.FIFF_SSS_JOB = 264
FIFF.FIFF_SSS_ORIGIN = 265
FIFF.FIFF_SSS_ORD_IN = 266
FIFF.FIFF_SSS_ORD_OUT = 267
FIFF.FIFF_SSS_NMAG = 268
FIFF.FIFF_SSS_COMPONENTS = 269
FIFF.FIFF_SSS_CAL_CHANS = 270
FIFF.FIFF_SSS_CAL_CORRS = 271
FIFF.FIFF_SSS_ST_CORR = 272
FIFF.FIFF_SSS_NFREE = 278
FIFF.FIFF_SSS_ST_LENGTH = 279
FIFF.FIFF_DECOUPLER_MATRIX = 800
#
# Fiff values associated with MNE computations
#
FIFF.FIFFV_MNE_UNKNOWN_ORI = 0
FIFF.FIFFV_MNE_FIXED_ORI = 1
FIFF.FIFFV_MNE_FREE_ORI = 2
FIFF.FIFFV_MNE_MEG = 1
FIFF.FIFFV_MNE_EEG = 2
FIFF.FIFFV_MNE_MEG_EEG = 3
FIFF.FIFFV_MNE_PRIORS_NONE = 0
FIFF.FIFFV_MNE_PRIORS_DEPTH = 1
FIFF.FIFFV_MNE_PRIORS_LORETA = 2
FIFF.FIFFV_MNE_PRIORS_SULCI = 3
FIFF.FIFFV_MNE_UNKNOWN_COV = 0
FIFF.FIFFV_MNE_SENSOR_COV = 1
FIFF.FIFFV_MNE_NOISE_COV = 1 # This is what it should have been called
FIFF.FIFFV_MNE_SOURCE_COV = 2
FIFF.FIFFV_MNE_FMRI_PRIOR_COV = 3
FIFF.FIFFV_MNE_SIGNAL_COV = 4 # This will be potentially employed in beamformers
FIFF.FIFFV_MNE_DEPTH_PRIOR_COV = 5 # The depth weighting prior
FIFF.FIFFV_MNE_ORIENT_PRIOR_COV = 6 # The orientation prior
#
# Output map types
#
FIFF.FIFFV_MNE_MAP_UNKNOWN = -1 # Unspecified
FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT = 1 # Scalar current value
FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_SIZE = 2 # Absolute value of the above
FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT = 3 # Current vector components
FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_SIZE = 4 # Vector current size
FIFF.FIFFV_MNE_MAP_T_STAT = 5 # Student's t statistic
FIFF.FIFFV_MNE_MAP_F_STAT = 6 # F statistic
FIFF.FIFFV_MNE_MAP_F_STAT_SQRT = 7 # Square root of the F statistic
FIFF.FIFFV_MNE_MAP_CHI2_STAT = 8 # (Approximate) chi^2 statistic
FIFF.FIFFV_MNE_MAP_CHI2_STAT_SQRT = 9 # Square root of the (approximate) chi^2 statistic
FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_NOISE = 10 # Current noise approximation (scalar)
FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_NOISE = 11 # Current noise approximation (vector)
#
# Source space types (values of FIFF_MNE_SOURCE_SPACE_TYPE)
#
FIFF.FIFFV_MNE_SPACE_UNKNOWN = -1
FIFF.FIFFV_MNE_SPACE_SURFACE = 1
FIFF.FIFFV_MNE_SPACE_VOLUME = 2
FIFF.FIFFV_MNE_SPACE_DISCRETE = 3
#
# Covariance matrix channel classification
#
FIFF.FIFFV_MNE_COV_CH_UNKNOWN = -1 # No idea
FIFF.FIFFV_MNE_COV_CH_MEG_MAG = 0 # Axial gradiometer or magnetometer [T]
FIFF.FIFFV_MNE_COV_CH_MEG_GRAD = 1 # Planar gradiometer [T/m]
FIFF.FIFFV_MNE_COV_CH_EEG = 2 # EEG [V]
#
# Projection item kinds
#
FIFF.FIFFV_PROJ_ITEM_NONE = 0
FIFF.FIFFV_PROJ_ITEM_FIELD = 1
FIFF.FIFFV_PROJ_ITEM_DIP_FIX = 2
FIFF.FIFFV_PROJ_ITEM_DIP_ROT = 3
FIFF.FIFFV_PROJ_ITEM_HOMOG_GRAD = 4
FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD = 5
FIFF.FIFFV_PROJ_ITEM_EEG_AVREF = 10 # Linear projection related to EEG average reference
FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF = FIFF.FIFFV_PROJ_ITEM_EEG_AVREF # backward compat alias
#
# Custom EEG references
#
FIFF.FIFFV_MNE_CUSTOM_REF_OFF = 0
FIFF.FIFFV_MNE_CUSTOM_REF_ON = 1
FIFF.FIFFV_MNE_CUSTOM_REF_CSD = 2
#
# SSS job options
#
FIFF.FIFFV_SSS_JOB_NOTHING = 0 # No SSS, just copy input to output
FIFF.FIFFV_SSS_JOB_CTC = 1 # No SSS, only cross-talk correction
FIFF.FIFFV_SSS_JOB_FILTER = 2 # Spatial maxwell filtering
FIFF.FIFFV_SSS_JOB_VIRT = 3 # Transform data to another sensor array
FIFF.FIFFV_SSS_JOB_HEAD_POS = 4 # Estimate head positions, no SSS
FIFF.FIFFV_SSS_JOB_MOVEC_FIT = 5 # Estimate and compensate head movement
FIFF.FIFFV_SSS_JOB_MOVEC_QUA = 6 # Compensate head movement from previously estimated head positions
FIFF.FIFFV_SSS_JOB_REC_ALL = 7 # Reconstruct inside and outside signals
FIFF.FIFFV_SSS_JOB_REC_IN = 8 # Reconstruct inside signals
FIFF.FIFFV_SSS_JOB_REC_OUT = 9 # Reconstruct outside signals
FIFF.FIFFV_SSS_JOB_ST = 10 # Spatio-temporal maxwell filtering
FIFF.FIFFV_SSS_JOB_TPROJ = 11 # Temporal projection, no SSS
FIFF.FIFFV_SSS_JOB_XSSS = 12 # Cross-validation SSS
FIFF.FIFFV_SSS_JOB_XSUB = 13 # Cross-validation subtraction, no SSS
FIFF.FIFFV_SSS_JOB_XWAV = 14 # Cross-validation noise waveforms
FIFF.FIFFV_SSS_JOB_NCOV = 15 # Noise covariance estimation
FIFF.FIFFV_SSS_JOB_SCOV = 16 # SSS sample covariance estimation
#}
#
# Additional coordinate frames
#
FIFF.FIFFV_MNE_COORD_TUFTS_EEG = 300 # For Tufts EEG data
FIFF.FIFFV_MNE_COORD_CTF_DEVICE = 1001 # CTF device coordinates
FIFF.FIFFV_MNE_COORD_CTF_HEAD = 1004 # CTF head coordinates
FIFF.FIFFV_MNE_COORD_DIGITIZER = FIFF.FIFFV_COORD_ISOTRAK # Original (Polhemus) digitizer coordinates
FIFF.FIFFV_MNE_COORD_SURFACE_RAS = FIFF.FIFFV_COORD_MRI # The surface RAS coordinates
FIFF.FIFFV_MNE_COORD_MRI_VOXEL = 2001 # The MRI voxel coordinates
FIFF.FIFFV_MNE_COORD_RAS = 2002 # Surface RAS coordinates with non-zero origin
FIFF.FIFFV_MNE_COORD_MNI_TAL = 2003 # MNI Talairach coordinates
FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ = 2004 # FreeSurfer Talairach coordinates (MNI z > 0)
FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ = 2005 # FreeSurfer Talairach coordinates (MNI z < 0)
FIFF.FIFFV_MNE_COORD_FS_TAL = 2006 # FreeSurfer Talairach coordinates
#
# 4D and KIT use the same head coordinate system definition as CTF
#
FIFF.FIFFV_MNE_COORD_4D_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD
FIFF.FIFFV_MNE_COORD_KIT_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD
#
# FWD Types
#
FWD = BunchConstNamed()
FWD.COIL_UNKNOWN = 0
FWD.COILC_UNKNOWN = 0
FWD.COILC_EEG = 1000
FWD.COILC_MAG = 1
FWD.COILC_AXIAL_GRAD = 2
FWD.COILC_PLANAR_GRAD = 3
FWD.COILC_AXIAL_GRAD2 = 4
FWD.COIL_ACCURACY_POINT = 0
FWD.COIL_ACCURACY_NORMAL = 1
FWD.COIL_ACCURACY_ACCURATE = 2
FWD.BEM_UNKNOWN = -1
FWD.BEM_CONSTANT_COLL = 1
FWD.BEM_LINEAR_COLL = 2
FWD.BEM_IP_APPROACH_LIMIT = 0.1
FWD.BEM_LIN_FIELD_SIMPLE = 1
FWD.BEM_LIN_FIELD_FERGUSON = 2
FWD.BEM_LIN_FIELD_URANKAR = 3
#
# Data types
#
FIFF.FIFFT_VOID = 0
FIFF.FIFFT_BYTE = 1
FIFF.FIFFT_SHORT = 2
FIFF.FIFFT_INT = 3
FIFF.FIFFT_FLOAT = 4
FIFF.FIFFT_DOUBLE = 5
FIFF.FIFFT_JULIAN = 6
FIFF.FIFFT_USHORT = 7
FIFF.FIFFT_UINT = 8
FIFF.FIFFT_ULONG = 9
FIFF.FIFFT_STRING = 10
FIFF.FIFFT_LONG = 11
FIFF.FIFFT_DAU_PACK13 = 13
FIFF.FIFFT_DAU_PACK14 = 14
FIFF.FIFFT_DAU_PACK16 = 16
FIFF.FIFFT_COMPLEX_FLOAT = 20
FIFF.FIFFT_COMPLEX_DOUBLE = 21
FIFF.FIFFT_OLD_PACK = 23
FIFF.FIFFT_CH_INFO_STRUCT = 30
FIFF.FIFFT_ID_STRUCT = 31
FIFF.FIFFT_DIR_ENTRY_STRUCT = 32
FIFF.FIFFT_DIG_POINT_STRUCT = 33
FIFF.FIFFT_CH_POS_STRUCT = 34
FIFF.FIFFT_COORD_TRANS_STRUCT = 35
FIFF.FIFFT_DIG_STRING_STRUCT = 36
FIFF.FIFFT_STREAM_SEGMENT_STRUCT = 37
#
# Units of measurement
#
FIFF.FIFF_UNIT_NONE = -1
#
# SI base units
#
FIFF.FIFF_UNIT_UNITLESS = 0
FIFF.FIFF_UNIT_M = 1 # meter
FIFF.FIFF_UNIT_KG = 2 # kilogram
FIFF.FIFF_UNIT_SEC = 3 # second
FIFF.FIFF_UNIT_A = 4 # ampere
FIFF.FIFF_UNIT_K = 5 # Kelvin
FIFF.FIFF_UNIT_MOL = 6 # mole
#
# SI Supplementary units
#
FIFF.FIFF_UNIT_RAD = 7 # radian
FIFF.FIFF_UNIT_SR = 8 # steradian
#
# SI base candela
#
FIFF.FIFF_UNIT_CD = 9 # candela
#
# SI derived units
#
FIFF.FIFF_UNIT_MOL_M3 = 10 # mol/m^3
FIFF.FIFF_UNIT_HZ = 101 # hertz
FIFF.FIFF_UNIT_N = 102 # Newton
FIFF.FIFF_UNIT_PA = 103 # pascal
FIFF.FIFF_UNIT_J = 104 # joule
FIFF.FIFF_UNIT_W = 105 # watt
FIFF.FIFF_UNIT_C = 106 # coulomb
FIFF.FIFF_UNIT_V = 107 # volt
FIFF.FIFF_UNIT_F = 108 # farad
FIFF.FIFF_UNIT_OHM = 109 # ohm
FIFF.FIFF_UNIT_MHO = 110 # one per ohm
FIFF.FIFF_UNIT_WB = 111 # weber
FIFF.FIFF_UNIT_T = 112 # tesla
FIFF.FIFF_UNIT_H = 113 # Henry
FIFF.FIFF_UNIT_CEL = 114 # celsius
FIFF.FIFF_UNIT_LM = 115 # lumen
FIFF.FIFF_UNIT_LX = 116 # lux
FIFF.FIFF_UNIT_V_M2 = 117 # V/m^2
#
# Others we need
#
FIFF.FIFF_UNIT_T_M = 201 # T/m
FIFF.FIFF_UNIT_AM = 202 # Am
FIFF.FIFF_UNIT_AM_M2 = 203 # Am/m^2
FIFF.FIFF_UNIT_AM_M3 = 204 # Am/m^3
_ch_unit_named = {key: key for key in(
FIFF.FIFF_UNIT_NONE, FIFF.FIFF_UNIT_UNITLESS, FIFF.FIFF_UNIT_M,
FIFF.FIFF_UNIT_KG, FIFF.FIFF_UNIT_SEC, FIFF.FIFF_UNIT_A, FIFF.FIFF_UNIT_K,
FIFF.FIFF_UNIT_MOL, FIFF.FIFF_UNIT_RAD, FIFF.FIFF_UNIT_SR,
FIFF.FIFF_UNIT_CD, FIFF.FIFF_UNIT_MOL_M3, FIFF.FIFF_UNIT_HZ,
FIFF.FIFF_UNIT_N, FIFF.FIFF_UNIT_PA, FIFF.FIFF_UNIT_J, FIFF.FIFF_UNIT_W,
FIFF.FIFF_UNIT_C, FIFF.FIFF_UNIT_V, FIFF.FIFF_UNIT_F, FIFF.FIFF_UNIT_OHM,
FIFF.FIFF_UNIT_MHO, FIFF.FIFF_UNIT_WB, FIFF.FIFF_UNIT_T, FIFF.FIFF_UNIT_H,
FIFF.FIFF_UNIT_CEL, FIFF.FIFF_UNIT_LM, FIFF.FIFF_UNIT_LX,
FIFF.FIFF_UNIT_V_M2, FIFF.FIFF_UNIT_T_M, FIFF.FIFF_UNIT_AM,
FIFF.FIFF_UNIT_AM_M2, FIFF.FIFF_UNIT_AM_M3,
)}
#
# Multipliers
#
FIFF.FIFF_UNITM_E = 18
FIFF.FIFF_UNITM_PET = 15
FIFF.FIFF_UNITM_T = 12
FIFF.FIFF_UNITM_GIG = 9
FIFF.FIFF_UNITM_MEG = 6
FIFF.FIFF_UNITM_K = 3
FIFF.FIFF_UNITM_H = 2
FIFF.FIFF_UNITM_DA = 1
FIFF.FIFF_UNITM_NONE = 0
FIFF.FIFF_UNITM_D = -1
FIFF.FIFF_UNITM_C = -2
FIFF.FIFF_UNITM_M = -3
FIFF.FIFF_UNITM_MU = -6
FIFF.FIFF_UNITM_N = -9
FIFF.FIFF_UNITM_P = -12
FIFF.FIFF_UNITM_F = -15
FIFF.FIFF_UNITM_A = -18
_ch_unit_mul_named = {key: key for key in (
FIFF.FIFF_UNITM_E, FIFF.FIFF_UNITM_PET, FIFF.FIFF_UNITM_T,
FIFF.FIFF_UNITM_GIG, FIFF.FIFF_UNITM_MEG, FIFF.FIFF_UNITM_K,
FIFF.FIFF_UNITM_H, FIFF.FIFF_UNITM_DA, FIFF.FIFF_UNITM_NONE,
FIFF.FIFF_UNITM_D, FIFF.FIFF_UNITM_C, FIFF.FIFF_UNITM_M,
FIFF.FIFF_UNITM_MU, FIFF.FIFF_UNITM_N, FIFF.FIFF_UNITM_P,
FIFF.FIFF_UNITM_F, FIFF.FIFF_UNITM_A,
)}
#
# Coil types
#
FIFF.FIFFV_COIL_NONE = 0 # The location info contains no data
FIFF.FIFFV_COIL_EEG = 1 # EEG electrode position in r0
FIFF.FIFFV_COIL_NM_122 = 2 # Neuromag 122 coils
FIFF.FIFFV_COIL_NM_24 = 3 # Old 24 channel system in HUT
FIFF.FIFFV_COIL_NM_MCG_AXIAL = 4 # The axial devices in the HUCS MCG system
FIFF.FIFFV_COIL_EEG_BIPOLAR = 5 # Bipolar EEG lead
FIFF.FIFFV_COIL_EEG_CSD = 6 # CSD-transformed EEG lead
FIFF.FIFFV_COIL_DIPOLE = 200 # Time-varying dipole definition
# The coil info contains dipole location (r0) and
# direction (ex)
FIFF.FIFFV_COIL_FNIRS_HBO = 300 # fNIRS oxyhemoglobin
FIFF.FIFFV_COIL_FNIRS_HBR = 301 # fNIRS deoxyhemoglobin
FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE = 302 # fNIRS continuous wave amplitude
FIFF.FIFFV_COIL_FNIRS_OD = 303 # fNIRS optical density
FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE = 304 # fNIRS frequency domain AC amplitude
FIFF.FIFFV_COIL_FNIRS_FD_PHASE = 305 # fNIRS frequency domain phase
FIFF.FIFFV_COIL_FNIRS_RAW = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE # old alias
FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software
FIFF.FIFFV_COIL_POINT_MAGNETOMETER = 2000 # Simple point magnetometer
FIFF.FIFFV_COIL_AXIAL_GRAD_5CM = 2001 # Generic axial gradiometer
FIFF.FIFFV_COIL_VV_PLANAR_W = 3011 # VV prototype wirewound planar sensor
FIFF.FIFFV_COIL_VV_PLANAR_T1 = 3012 # Vectorview SQ20483N planar gradiometer
FIFF.FIFFV_COIL_VV_PLANAR_T2 = 3013 # Vectorview SQ20483N-A planar gradiometer
FIFF.FIFFV_COIL_VV_PLANAR_T3 = 3014 # Vectorview SQ20950N planar gradiometer
FIFF.FIFFV_COIL_VV_PLANAR_T4 = 3015 # Vectorview planar gradiometer (MEG-MRI)
FIFF.FIFFV_COIL_VV_MAG_W = 3021 # VV prototype wirewound magnetometer
FIFF.FIFFV_COIL_VV_MAG_T1 = 3022 # Vectorview SQ20483N magnetometer
FIFF.FIFFV_COIL_VV_MAG_T2 = 3023 # Vectorview SQ20483-A magnetometer
FIFF.FIFFV_COIL_VV_MAG_T3 = 3024 # Vectorview SQ20950N magnetometer
FIFF.FIFFV_COIL_VV_MAG_T4 = 3025 # Vectorview magnetometer (MEG-MRI)
FIFF.FIFFV_COIL_MAGNES_MAG = 4001 # Magnes WH magnetometer
FIFF.FIFFV_COIL_MAGNES_GRAD = 4002 # Magnes WH gradiometer
#
# Magnes reference sensors
#
FIFF.FIFFV_COIL_MAGNES_REF_MAG = 4003
FIFF.FIFFV_COIL_MAGNES_REF_GRAD = 4004
FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD = 4005
FIFF.FIFFV_COIL_MAGNES_R_MAG = FIFF.FIFFV_COIL_MAGNES_REF_MAG
FIFF.FIFFV_COIL_MAGNES_R_GRAD = FIFF.FIFFV_COIL_MAGNES_REF_GRAD
FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF = FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD
#
# CTF coil and channel types
#
FIFF.FIFFV_COIL_CTF_GRAD = 5001
FIFF.FIFFV_COIL_CTF_REF_MAG = 5002
FIFF.FIFFV_COIL_CTF_REF_GRAD = 5003
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD = 5004
#
# KIT system coil types
#
FIFF.FIFFV_COIL_KIT_GRAD = 6001
FIFF.FIFFV_COIL_KIT_REF_MAG = 6002
#
# BabySQUID sensors
#
FIFF.FIFFV_COIL_BABY_GRAD = 7001
#
# BabyMEG sensors
#
FIFF.FIFFV_COIL_BABY_MAG = 7002
FIFF.FIFFV_COIL_BABY_REF_MAG = 7003
FIFF.FIFFV_COIL_BABY_REF_MAG2 = 7004
#
# Artemis123 sensors
#
FIFF.FIFFV_COIL_ARTEMIS123_GRAD = 7501
FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG = 7502
FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD = 7503
#
# QuSpin sensors
#
FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG = 8001
FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2 = 8002
#
# KRISS sensors
#
FIFF.FIFFV_COIL_KRISS_GRAD = 9001
#
# Compumedics adult/pediatric gradiometer
#
FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD = 9101
FIFF.FIFFV_COIL_COMPUMEDICS_PEDIATRIC_GRAD = 9102
_ch_coil_type_named = {key: key for key in (
FIFF.FIFFV_COIL_NONE, FIFF.FIFFV_COIL_EEG, FIFF.FIFFV_COIL_NM_122,
FIFF.FIFFV_COIL_NM_24, FIFF.FIFFV_COIL_NM_MCG_AXIAL,
FIFF.FIFFV_COIL_EEG_BIPOLAR, FIFF.FIFFV_COIL_EEG_CSD,
FIFF.FIFFV_COIL_DIPOLE, FIFF.FIFFV_COIL_FNIRS_HBO,
FIFF.FIFFV_COIL_FNIRS_HBR, FIFF.FIFFV_COIL_FNIRS_RAW,
FIFF.FIFFV_COIL_FNIRS_OD, FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE,
FIFF.FIFFV_COIL_FNIRS_FD_PHASE, FIFF.FIFFV_COIL_MCG_42,
FIFF.FIFFV_COIL_POINT_MAGNETOMETER, FIFF.FIFFV_COIL_AXIAL_GRAD_5CM,
FIFF.FIFFV_COIL_VV_PLANAR_W, FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2, FIFF.FIFFV_COIL_VV_PLANAR_T3,
FIFF.FIFFV_COIL_VV_PLANAR_T4, FIFF.FIFFV_COIL_VV_MAG_W,
FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFFV_COIL_VV_MAG_T4,
FIFF.FIFFV_COIL_MAGNES_MAG, FIFF.FIFFV_COIL_MAGNES_GRAD,
FIFF.FIFFV_COIL_MAGNES_REF_MAG, FIFF.FIFFV_COIL_MAGNES_REF_GRAD,
FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD, FIFF.FIFFV_COIL_CTF_GRAD,
FIFF.FIFFV_COIL_CTF_REF_MAG, FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD, FIFF.FIFFV_COIL_KIT_GRAD,
FIFF.FIFFV_COIL_KIT_REF_MAG, FIFF.FIFFV_COIL_BABY_GRAD,
FIFF.FIFFV_COIL_BABY_MAG, FIFF.FIFFV_COIL_BABY_REF_MAG,
FIFF.FIFFV_COIL_BABY_REF_MAG2, FIFF.FIFFV_COIL_ARTEMIS123_GRAD,
FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG, FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD,
FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG, FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2,
FIFF.FIFFV_COIL_KRISS_GRAD, FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD,
FIFF.FIFFV_COIL_COMPUMEDICS_PEDIATRIC_GRAD,
)}
# MNE RealTime
FIFF.FIFF_MNE_RT_COMMAND = 3700 # realtime command
FIFF.FIFF_MNE_RT_CLIENT_ID = 3701 # realtime client
# MNE epochs bookkeeping
FIFF.FIFF_MNE_EPOCHS_SELECTION = 3800 # the epochs selection
FIFF.FIFF_MNE_EPOCHS_DROP_LOG = 3801 # the drop log
FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT = 3802 # rejection and flat params
# MNE annotations
FIFF.FIFFB_MNE_ANNOTATIONS = 3810 # annotations block
# MNE Metadata Dataframes
FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block
# Table to match unrecognized channel location names to their known aliases
CHANNEL_LOC_ALIASES = {
# this set of aliases are published in doi:10.1097/WNP.0000000000000316 and
# doi:10.1016/S1388-2457(00)00527-7.
'Cb1': 'POO7',
'Cb2': 'POO8',
'CB1': 'POO7',
'CB2': 'POO8',
'T1': 'T9',
'T2': 'T10',
'T3': 'T7',
'T4': 'T8',
'T5': 'T9',
'T6': 'T10',
'M1': 'TP9',
'M2': 'TP10'
# add a comment here (with doi of a published source) above any new
# aliases, as they are added
}
| bsd-3-clause |
jdemel/gnuradio | gnuradio-runtime/python/gnuradio/gr/tag_utils.py | 1 | 5013 | from __future__ import unicode_literals
import pmt
from . import gr_python as gr
class PythonTag(object):
" Python container for tags "
def __init__(self):
self.offset = None
self.key = None
self.value = None
self.srcid = False
def tag_to_python(tag):
""" Convert a stream tag to a Python-readable object """
newtag = PythonTag()
newtag.offset = tag.offset
newtag.key = pmt.to_python(tag.key)
newtag.value = pmt.to_python(tag.value)
newtag.srcid = pmt.to_python(tag.srcid)
return newtag
def tag_to_pmt(tag):
""" Convert a Python-readable object to a stream tag """
newtag = gr.tag_t()
newtag.offset = tag.offset
newtag.key = pmt.to_python(tag.key)
newtag.value = pmt.from_python(tag.value)
newtag.srcid = pmt.from_python(tag.srcid)
return newtag
def python_to_tag(tag_struct):
"""
Convert a Python list/tuple/dictionary to a stream tag.
When using a list or tuple format, this function expects the format:
tag_struct[0] --> tag's offset (as an integer)
tag_struct[1] --> tag's key (as a PMT)
tag_struct[2] --> tag's value (as a PMT)
tag_struct[3] --> tag's srcid (as a PMT)
When using a dictionary, we specify the dictionary keys using:
tag_struct['offset'] --> tag's offset (as an integer)
tag_struct['key'] --> tag's key (as a PMT)
tag_struct['value'] --> tag's value (as a PMT)
tag_struct['srcid'] --> tag's srcid (as a PMT)
If the function can take the Python object and successfully
construct a tag, it will return the tag. Otherwise, it will return
None.
"""
good = False
tag = gr.tag_t()
if(type(tag_struct) == dict):
if('offset' in tag_struct):
if(isinstance(tag_struct['offset'], int)):
tag.offset = tag_struct['offset']
good = True
if('key' in tag_struct):
if(isinstance(tag_struct['key'], pmt.pmt_base)):
tag.key = tag_struct['key']
good = True
if('value' in tag_struct):
if(isinstance(tag_struct['value'], pmt.pmt_base)):
tag.value = tag_struct['value']
good = True
if('srcid' in tag_struct):
if(isinstance(tag_struct['srcid'], pmt.pmt_base)):
tag.srcid = tag_struct['srcid']
good = True
elif(type(tag_struct) == list or type(tag_struct) == tuple):
if(len(tag_struct) == 4):
if(isinstance(tag_struct[0], int)):
tag.offset = tag_struct[0]
good = True
if(isinstance(tag_struct[1], pmt.pmt_base)):
tag.key = tag_struct[1]
good = True
if(isinstance(tag_struct[2], pmt.pmt_base)):
tag.value = tag_struct[2]
good = True
if(isinstance(tag_struct[3], pmt.pmt_base)):
tag.srcid = tag_struct[3]
good = True
elif(len(tag_struct) == 3):
if(isinstance(tag_struct[0], int)):
tag.offset = tag_struct[0]
good = True
if(isinstance(tag_struct[1], pmt.pmt_base)):
tag.key = tag_struct[1]
good = True
if(isinstance(tag_struct[2], pmt.pmt_base)):
tag.value = tag_struct[2]
good = True
tag.srcid = pmt.PMT_F
if(good):
return tag
else:
return None
def tag_t_offset_compare_key():
"""
Convert a tag_t.offset_compare function into a key=function
This method is modeled after functools.cmp_to_key(_func_).
It can be used by functions that accept a key function, such as
sorted(), min(), max(), etc. to compare tags by their offsets,
e.g., sorted(tag_list, key=gr.tag_t.offset_compare_key()).
"""
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
# x.offset < y.offset
return gr.tag_t.offset_compare(self.obj, other.obj)
def __gt__(self, other):
# y.offset < x.offset
return gr.tag_t.offset_compare(other.obj, self.obj)
def __eq__(self, other):
# not (x.offset < y.offset) and not (y.offset < x.offset)
return not gr.tag_t.offset_compare(self.obj, other.obj) and \
not gr.tag_t.offset_compare(other.obj, self.obj)
def __le__(self, other):
# not (y.offset < x.offset)
return not gr.tag_t.offset_compare(other.obj, self.obj)
def __ge__(self, other):
# not (x.offset < y.offset)
return not gr.tag_t.offset_compare(self.obj, other.obj)
def __ne__(self, other):
# (x.offset < y.offset) or (y.offset < x.offset)
return gr.tag_t.offset_compare(self.obj, other.obj) or \
gr.tag_t.offset_compare(other.obj, self.obj)
return K
| gpl-3.0 |
ytjiang/django | django/contrib/gis/maps/google/__init__.py | 158 | 2762 | """
This module houses the GoogleMap object, used for generating
the needed javascript to embed Google Maps in a Web page.
Google(R) is a registered trademark of Google, Inc. of Mountain View, California.
Example:
* In the view:
return render_to_response('template.html', {'google' : GoogleMap(key="abcdefg")})
* In the template:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{{ google.xhtml }}
<head>
<title>Google Maps via GeoDjango</title>
{{ google.style }}
{{ google.scripts }}
</head>
{{ google.body }}
<div id="{{ google.dom_id }}" style="width:600px;height:400px;"></div>
</body>
</html>
Note: If you want to be more explicit in your templates, the following are
equivalent:
{{ google.body }} => "<body {{ google.onload }} {{ google.onunload }}>"
{{ google.xhtml }} => "<html xmlns="http://www.w3.org/1999/xhtml" {{ google.xmlns }}>"
{{ google.style }} => "<style>{{ google.vml_css }}</style>"
Explanation:
- The `xhtml` property provides the correct XML namespace needed for
Google Maps to operate in IE using XHTML. Google Maps on IE uses
VML to draw polylines. Returns, by default:
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
- The `style` property provides the correct style tag for the CSS
properties required by Google Maps on IE:
<style type="text/css">v\:* {behavior:url(#default#VML);}</style>
- The `scripts` property provides the necessary <script> tags for
including the Google Maps javascript, as well as including the
generated javascript.
- The `body` property provides the correct attributes for the
body tag to load the generated javascript. By default, returns:
<body onload="gmap_load()" onunload="GUnload()">
- The `dom_id` property returns the DOM id for the map. Defaults to "map".
The following attributes may be set or customized in your local settings:
* GOOGLE_MAPS_API_KEY: String of your Google Maps API key. These are tied
to a domain. May be obtained from http://www.google.com/apis/maps/
* GOOGLE_MAPS_API_VERSION (optional): Defaults to using "2.x"
* GOOGLE_MAPS_URL (optional): Must have a substitution ('%s') for the API
version.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap, GoogleMapSet
from django.contrib.gis.maps.google.overlays import GEvent, GIcon, GMarker, GPolygon, GPolyline
from django.contrib.gis.maps.google.zoom import GoogleZoom
__all__ = [
'GoogleMap', 'GoogleMapSet', 'GEvent', 'GIcon', 'GMarker', 'GPolygon',
'GPolyline', 'GoogleZoom',
]
| bsd-3-clause |
mkaluza/external_chromium_org | chrome/common/extensions/docs/server2/test_servlet_test.py | 122 | 1476 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from empty_dir_file_system import EmptyDirFileSystem
from host_file_system_provider import HostFileSystemProvider
from servlet import Request
from test_branch_utility import TestBranchUtility
from fail_on_access_file_system import FailOnAccessFileSystem
from test_servlet import TestServlet
class _TestDelegate(object):
def CreateBranchUtility(self, object_store_creator):
return TestBranchUtility.CreateWithCannedData()
def CreateAppSamplesFileSystem(self, object_store_creator):
return EmptyDirFileSystem()
def CreateHostFileSystemProvider(self, object_store_creator):
return HostFileSystemProvider.ForTest(
FailOnAccessFileSystem(), object_store_creator)
# This test can't really be useful. The set of valid tests is changing and
# there is no reason to test the tests themselves, they are already tested in
# their respective modules. The only testable behavior TestServlet adds is
# returning a 404 if a test does not exist.
class TestServletTest(unittest.TestCase):
def testTestServlet(self):
request = Request('not_a_real_test_url', 'localhost', {})
test_servlet = TestServlet(request, _TestDelegate())
response = test_servlet.Get()
self.assertEqual(404, response.status)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
hyperized/ansible | lib/ansible/modules/network/netscaler/netscaler_gslb_site.py | 52 | 14162 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_gslb_site
short_description: Manage gslb site entities in Netscaler.
description:
- Manage gslb site entities in Netscaler.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
sitename:
description:
- >-
Name for the GSLB site. Must begin with an ASCII alphanumeric or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters. Cannot be changed after the virtual server is created.
- "Minimum length = 1"
sitetype:
choices:
- 'REMOTE'
- 'LOCAL'
description:
- >-
Type of site to create. If the type is not specified, the appliance automatically detects and sets
the type on the basis of the IP address being assigned to the site. If the specified site IP address
is owned by the appliance (for example, a MIP address or SNIP address), the site is a local site.
Otherwise, it is a remote site.
siteipaddress:
description:
- >-
IP address for the GSLB site. The GSLB site uses this IP address to communicate with other GSLB
sites. For a local site, use any IP address that is owned by the appliance (for example, a SNIP or
MIP address, or the IP address of the ADNS service).
- "Minimum length = 1"
publicip:
description:
- >-
Public IP address for the local site. Required only if the appliance is deployed in a private address
space and the site has a public IP address hosted on an external firewall or a NAT device.
- "Minimum length = 1"
metricexchange:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Exchange metrics with other sites. Metrics are exchanged by using Metric Exchange Protocol (MEP). The
appliances in the GSLB setup exchange health information once every second.
- >-
If you disable metrics exchange, you can use only static load balancing methods (such as round robin,
static proximity, or the hash-based methods), and if you disable metrics exchange when a dynamic load
balancing method (such as least connection) is in operation, the appliance falls back to round robin.
Also, if you disable metrics exchange, you must use a monitor to determine the state of GSLB
services. Otherwise, the service is marked as DOWN.
nwmetricexchange:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Exchange, with other GSLB sites, network metrics such as round-trip time (RTT), learned from
communications with various local DNS (LDNS) servers used by clients. RTT information is used in the
dynamic RTT load balancing method, and is exchanged every 5 seconds.
sessionexchange:
choices:
- 'enabled'
- 'disabled'
description:
- "Exchange persistent session entries with other GSLB sites every five seconds."
triggermonitor:
choices:
- 'ALWAYS'
- 'MEPDOWN'
- 'MEPDOWN_SVCDOWN'
description:
- >-
Specify the conditions under which the GSLB service must be monitored by a monitor, if one is bound.
Available settings function as follows:
- "* C(ALWAYS) - Monitor the GSLB service at all times."
- >-
* C(MEPDOWN) - Monitor the GSLB service only when the exchange of metrics through the Metrics Exchange
Protocol (MEP) is disabled.
- "C(MEPDOWN_SVCDOWN) - Monitor the service in either of the following situations:"
- "* The exchange of metrics through MEP is disabled."
- >-
* The exchange of metrics through MEP is enabled but the status of the service, learned through
metrics exchange, is DOWN.
parentsite:
description:
- "Parent site of the GSLB site, in a parent-child topology."
clip:
description:
- >-
Cluster IP address. Specify this parameter to connect to the remote cluster site for GSLB auto-sync.
Note: The cluster IP address is defined when creating the cluster.
publicclip:
description:
- >-
IP address to be used to globally access the remote cluster when it is deployed behind a NAT. It can
be same as the normal cluster IP address.
naptrreplacementsuffix:
description:
- >-
The naptr replacement suffix configured here will be used to construct the naptr replacement field in
NAPTR record.
- "Minimum length = 1"
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Setup gslb site
delegate_to: localhost
netscaler_gslb_site:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
sitename: gslb-site-1
siteipaddress: 192.168.1.1
sitetype: LOCAL
publicip: 192.168.1.1
metricexchange: enabled
nwmetricexchange: enabled
sessionexchange: enabled
triggermonitor: ALWAYS
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }"
'''
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite import gslbsite
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log,
loglines,
ensure_feature_is_enabled,
get_immutables_intersection,
)
def gslb_site_exists(client, module):
if gslbsite.count_filtered(client, 'sitename:%s' % module.params['sitename']) > 0:
return True
else:
return False
def gslb_site_identical(client, module, gslb_site_proxy):
gslb_site_list = gslbsite.get_filtered(client, 'sitename:%s' % module.params['sitename'])
diff_dict = gslb_site_proxy.diff_object(gslb_site_list[0])
if len(diff_dict) == 0:
return True
else:
return False
def diff_list(client, module, gslb_site_proxy):
gslb_site_list = gslbsite.get_filtered(client, 'sitename:%s' % module.params['sitename'])
return gslb_site_proxy.diff_object(gslb_site_list[0])
def main():
module_specific_arguments = dict(
sitename=dict(type='str'),
sitetype=dict(
type='str',
choices=[
'REMOTE',
'LOCAL',
]
),
siteipaddress=dict(type='str'),
publicip=dict(type='str'),
metricexchange=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
nwmetricexchange=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
sessionexchange=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
triggermonitor=dict(
type='str',
choices=[
'ALWAYS',
'MEPDOWN',
'MEPDOWN_SVCDOWN',
]
),
parentsite=dict(type='str'),
clip=dict(type='str'),
publicclip=dict(type='str'),
naptrreplacementsuffix=dict(type='str'),
)
hand_inserted_arguments = dict(
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'sitename',
'sitetype',
'siteipaddress',
'publicip',
'metricexchange',
'nwmetricexchange',
'sessionexchange',
'triggermonitor',
'parentsite',
'clip',
'publicclip',
'naptrreplacementsuffix',
]
readonly_attrs = [
'status',
'persistencemepstatus',
'version',
'__count',
]
immutable_attrs = [
'sitename',
'sitetype',
'siteipaddress',
'publicip',
'parentsite',
'clip',
'publicclip',
]
transforms = {
'metricexchange': [lambda v: v.upper()],
'nwmetricexchange': [lambda v: v.upper()],
'sessionexchange': [lambda v: v.upper()],
}
# Instantiate config proxy
gslb_site_proxy = ConfigProxy(
actual=gslbsite(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
ensure_feature_is_enabled(client, 'GSLB')
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying actions for state present')
if not gslb_site_exists(client, module):
if not module.check_mode:
gslb_site_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not gslb_site_identical(client, module, gslb_site_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(gslb_site_proxy, diff_list(client, module, gslb_site_proxy).keys())
if immutables_changed != []:
module.fail_json(
msg='Cannot update immutable attributes %s' % (immutables_changed,),
diff=diff_list(client, module, gslb_site_proxy),
**module_result
)
if not module.check_mode:
gslb_site_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not gslb_site_exists(client, module):
module.fail_json(msg='GSLB site does not exist', **module_result)
if not gslb_site_identical(client, module, gslb_site_proxy):
module.fail_json(msg='GSLB site differs from configured', diff=diff_list(client, module, gslb_site_proxy), **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if gslb_site_exists(client, module):
if not module.check_mode:
gslb_site_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if gslb_site_exists(client, module):
module.fail_json(msg='GSLB site still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
lulandco/SickRage | lib/sqlalchemy/dialects/sybase/pyodbc.py | 79 | 2162 | # sybase/pyodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: sybase+pyodbc://<username>:<password>@<dsnname>[/<database>]
:url: http://pypi.python.org/pypi/pyodbc/
Unicode Support
---------------
The pyodbc driver currently supports usage of these Sybase types with
Unicode or multibyte strings::
CHAR
NCHAR
NVARCHAR
TEXT
VARCHAR
Currently *not* supported are::
UNICHAR
UNITEXT
UNIVARCHAR
"""
from sqlalchemy.dialects.sybase.base import SybaseDialect,\
SybaseExecutionContext
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy import types as sqltypes, processors
import decimal
class _SybNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < -6 into floats.
It's not yet known how to get decimals with many
significant digits or very large adjusted() into Sybase
via pyodbc.
"""
def bind_processor(self, dialect):
super_process = super(_SybNumeric_pyodbc, self).\
bind_processor(dialect)
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
if value.adjusted() < -6:
return processors.to_float(value)
if super_process:
return super_process(value)
else:
return value
return process
class SybaseExecutionContext_pyodbc(SybaseExecutionContext):
def set_ddl_autocommit(self, connection, value):
if value:
connection.autocommit = True
else:
connection.autocommit = False
class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect):
execution_ctx_cls = SybaseExecutionContext_pyodbc
colspecs = {
sqltypes.Numeric: _SybNumeric_pyodbc,
}
dialect = SybaseDialect_pyodbc
| gpl-3.0 |
Workday/OpenFrame | native_client_sdk/src/build_tools/build_sdk.py | 1 | 36370 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import argparse
import datetime
import glob
import os
import re
import sys
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import NACLPORTS_DIR, GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
BUILD_DIR = os.path.join(NACL_DIR, 'build')
NACL_TOOLCHAIN_DIR = os.path.join(NACL_DIR, 'toolchain')
NACL_TOOLCHAINTARS_DIR = os.path.join(NACL_TOOLCHAIN_DIR, '.tars')
CYGTAR = os.path.join(BUILD_DIR, 'cygtar.py')
PKGVER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
NACLPORTS_URL = 'https://chromium.googlesource.com/external/naclports.git'
NACLPORTS_REV = '65c71c1524a74ff8415573e5e5ef7c59ce4ac437'
GYPBUILD_DIR = 'gypbuild'
options = None
# Map of: ToolchainName: (PackageName, SDKDir, arch).
TOOLCHAIN_PACKAGE_MAP = {
'arm_glibc': ('nacl_arm_glibc', '%(platform)s_arm_glibc', 'arm'),
'x86_glibc': ('nacl_x86_glibc', '%(platform)s_x86_glibc', 'x86'),
'pnacl': ('pnacl_newlib', '%(platform)s_pnacl', 'pnacl')
}
def GetToolchainDirName(tcname):
"""Return the directory name for a given toolchain"""
return TOOLCHAIN_PACKAGE_MAP[tcname][1] % {'platform': getos.GetPlatform()}
def GetToolchainDir(pepperdir, tcname):
"""Return the full path to a given toolchain within a given sdk root"""
return os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
def GetToolchainLibc(tcname):
if tcname == 'pnacl':
return 'newlib'
for libc in ('glibc', 'newlib', 'host'):
if libc in tcname:
return libc
def GetToolchainNaClInclude(pepperdir, tcname, arch=None):
tcpath = GetToolchainDir(pepperdir, tcname)
if arch is None:
arch = TOOLCHAIN_PACKAGE_MAP[tcname][2]
if arch == 'x86':
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetConfigDir(arch):
if arch.endswith('x64') and getos.GetPlatform() == 'win':
return 'Release_x64'
else:
return 'Release'
def GetNinjaOutDir(arch):
return os.path.join(OUT_DIR, GYPBUILD_DIR + '-' + arch, GetConfigDir(arch))
def GetGypBuiltLib(tcname, arch):
if arch == 'ia32':
lib_suffix = '32'
elif arch == 'x64':
lib_suffix = '64'
elif arch == 'arm':
lib_suffix = 'arm'
else:
lib_suffix = ''
tcdir = 'tc_' + GetToolchainLibc(tcname)
if tcname == 'pnacl':
if arch is None:
lib_suffix = ''
tcdir = 'tc_pnacl_newlib'
arch = 'x64'
else:
arch = 'clang-' + arch
return os.path.join(GetNinjaOutDir(arch), 'gen', tcdir, 'lib' + lib_suffix)
def GetToolchainNaClLib(tcname, tcpath, arch):
if arch == 'ia32':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif arch == 'x64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
elif tcname == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'lib')
def GetOutputToolchainLib(pepperdir, tcname, arch):
tcpath = os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
return GetToolchainNaClLib(tcname, tcpath, arch)
def GetPNaClTranslatorLib(tcpath, arch):
if arch not in ['arm', 'x86-32', 'x86-64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
return os.path.join(tcpath, 'translator', arch, 'lib')
def BuildStepDownloadToolchains(toolchains):
buildbot_common.BuildStep('Running package_version.py')
args = [sys.executable, PKGVER, '--mode', 'nacl_core_sdk']
args.extend(['sync', '--extract'])
buildbot_common.Run(args, cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
dirs_to_remove = (
pepperdir,
pepperdir_old,
os.path.join(OUT_DIR, 'arm_trusted')
)
for dirname in dirs_to_remove:
if os.path.exists(dirname):
buildbot_common.RemoveDir(dirname)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${CHROME_COMMIT_POSITION}',
build_version.ChromeCommitPosition())
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
build_platform = '%s_x86' % platform
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
# Create a list of extract packages tuples, the first part should be
# "$PACKAGE_TARGET/$PACKAGE". The second part should be the destination
# directory relative to pepperdir/toolchain.
extract_packages = []
for toolchain in toolchains:
toolchain_map = TOOLCHAIN_PACKAGE_MAP.get(toolchain, None)
if toolchain_map:
package_name, tcdir, _ = toolchain_map
package_tuple = (os.path.join(build_platform, package_name),
tcdir % {'platform': platform})
extract_packages.append(package_tuple)
# On linux we also want to extract the arm_trusted package which contains
# the ARM libraries we ship in support of sel_ldr_arm.
if platform == 'linux':
extract_packages.append((os.path.join(build_platform, 'arm_trusted'),
'arm_trusted'))
if extract_packages:
# Extract all of the packages into the temp directory.
package_names = [package_tuple[0] for package_tuple in extract_packages]
buildbot_common.Run([sys.executable, PKGVER,
'--packages', ','.join(package_names),
'--tar-dir', NACL_TOOLCHAINTARS_DIR,
'--dest-dir', tmpdir,
'extract'])
# Move all the packages we extracted to the correct destination.
for package_name, dest_dir in extract_packages:
full_src_dir = os.path.join(tmpdir, package_name)
full_dst_dir = os.path.join(pepperdir, 'toolchain', dest_dir)
buildbot_common.Move(full_src_dir, full_dst_dir)
# Cleanup the temporary directory we are no longer using.
buildbot_common.RemoveDir(tmpdir)
# List of toolchain headers to install.
# Source is relative to top of Chromium tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/pthread/pthread.h', ''),
('native_client/src/untrusted/pthread/semaphore.h', ''),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'glibc': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if type(file_spec) == str:
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tcname):
"""Copies NaCl headers to expected locations in the toolchain."""
InstallFiles(SRC_DIR, tc_dst_inc, NACL_HEADER_MAP[GetToolchainLibc(tcname)])
def MakeNinjaRelPath(path):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), path)
# TODO(ncbray): stop building and copying libraries into the SDK that are
# already provided by the toolchain.
# Mapping from libc to libraries gyp-build trusted libraries
TOOLCHAIN_LIBS = {
'newlib' : [
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
],
'glibc': [
'libminidump_generator.a',
'libminidump_generator.so',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_dyncode.so',
'libnacl_exception.a',
'libnacl_exception.so',
'libnacl_list_mappings.a',
'libnacl_list_mappings.so',
'libppapi.a',
'libppapi.so',
'libppapi_stub.a',
]
}
def GypNinjaInstall(pepperdir, toolchains):
tools_files_32 = [
['sel_ldr', 'sel_ldr_x86_32'],
['irt_core_newlib_x32.nexe', 'irt_core_x86_32.nexe'],
['irt_core_newlib_x64.nexe', 'irt_core_x86_64.nexe'],
]
arm_files = [
['elf_loader_newlib_arm.nexe', 'elf_loader_arm.nexe'],
]
tools_files_64 = []
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files_64 += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
tools_files_64.append(['sel_ldr', 'sel_ldr_x86_64'])
tools_files_64.append(['ncval_new', 'ncval'])
if platform == 'linux':
tools_files_32.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32'])
tools_files_64.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_64'])
tools_files_32.append(['nonsfi_loader_newlib_x32_nonsfi.nexe',
'nonsfi_loader_x86_32'])
tools_dir = os.path.join(pepperdir, 'tools')
buildbot_common.MakeDir(tools_dir)
# Add .exe extensions to all windows tools
for pair in tools_files_32 + tools_files_64:
if platform == 'win' and not pair[0].endswith('.nexe'):
pair[0] += '.exe'
pair[1] += '.exe'
# Add ARM binaries
if platform == 'linux' and not options.no_arm_trusted:
arm_files += [
['irt_core_newlib_arm.nexe', 'irt_core_arm.nexe'],
['nacl_helper_bootstrap', 'nacl_helper_bootstrap_arm'],
['nonsfi_loader_newlib_arm_nonsfi.nexe', 'nonsfi_loader_arm'],
['sel_ldr', 'sel_ldr_arm']
]
InstallFiles(GetNinjaOutDir('x64'), tools_dir, tools_files_64)
InstallFiles(GetNinjaOutDir('ia32'), tools_dir, tools_files_32)
InstallFiles(GetNinjaOutDir('arm'), tools_dir, arm_files)
for tc in toolchains:
if tc in ('host', 'clang-newlib'):
continue
elif tc == 'pnacl':
xarches = (None, 'ia32', 'x64', 'arm')
elif tc in ('x86_glibc', 'x86_newlib'):
xarches = ('ia32', 'x64')
elif tc == 'arm_glibc':
xarches = ('arm',)
else:
raise AssertionError('unexpected toolchain value: %s' % tc)
for xarch in xarches:
src_dir = GetGypBuiltLib(tc, xarch)
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
libc = GetToolchainLibc(tc)
InstallFiles(src_dir, dst_dir, TOOLCHAIN_LIBS[libc])
def GypNinjaBuild_NaCl(rel_out_dir):
gyp_py = os.path.join(NACL_DIR, 'build', 'gyp_nacl')
nacl_core_sdk_gyp = os.path.join(NACL_DIR, 'build', 'nacl_core_sdk.gyp')
all_gyp = os.path.join(NACL_DIR, 'build', 'all.gyp')
out_dir_32 = MakeNinjaRelPath(rel_out_dir + '-ia32')
out_dir_64 = MakeNinjaRelPath(rel_out_dir + '-x64')
out_dir_arm = MakeNinjaRelPath(rel_out_dir + '-arm')
out_dir_clang_32 = MakeNinjaRelPath(rel_out_dir + '-clang-ia32')
out_dir_clang_64 = MakeNinjaRelPath(rel_out_dir + '-clang-x64')
out_dir_clang_arm = MakeNinjaRelPath(rel_out_dir + '-clang-arm')
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_32,
gyp_defines=['use_nacl_clang=0'])
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_64,
gyp_defines=['use_nacl_clang=0'])
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_arm,
gyp_defines=['use_nacl_clang=0'])
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_32, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_64, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_arm, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('x64', gyp_py, all_gyp, 'ncval_new', out_dir_64)
def GypNinjaBuild_Breakpad(rel_out_dir):
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if getos.GetPlatform() == 'win':
return
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'breakpad', 'breakpad.gyp')
build_list = ['dump_syms', 'minidump_dump', 'minidump_stackwalk']
GypNinjaBuild('x64', gyp_py, gyp_file, build_list, out_dir)
def GypNinjaBuild_PPAPI(arch, rel_out_dir, gyp_defines=None):
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client',
'native_client.gyp')
GypNinjaBuild(arch, gyp_py, gyp_file, 'ppapi_lib', out_dir,
gyp_defines=gyp_defines)
def GypNinjaBuild_Pnacl(rel_out_dir, target_arch):
# TODO(binji): This will build the pnacl_irt_shim twice; once as part of the
# Chromium build, and once here. When we move more of the SDK build process
# to gyp, we can remove this.
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client', 'src',
'untrusted', 'pnacl_irt_shim', 'pnacl_irt_shim.gyp')
targets = ['aot']
GypNinjaBuild(target_arch, gyp_py, gyp_file, targets, out_dir)
def GypNinjaBuild(arch, gyp_py_script, gyp_file, targets,
out_dir, gyp_defines=None):
gyp_env = dict(os.environ)
gyp_env['GYP_GENERATORS'] = 'ninja'
gyp_defines = gyp_defines or []
gyp_defines.append('nacl_allow_thin_archives=0')
if not options.no_use_sysroot:
gyp_defines.append('use_sysroot=1')
if options.mac_sdk:
gyp_defines.append('mac_sdk=%s' % options.mac_sdk)
if arch is not None:
gyp_defines.append('target_arch=%s' % arch)
if arch == 'arm':
gyp_env['GYP_CROSSCOMPILE'] = '1'
if options.no_arm_trusted:
gyp_defines.append('disable_cross_trusted=1')
if getos.GetPlatform() == 'mac':
gyp_defines.append('clang=1')
gyp_env['GYP_DEFINES'] = ' '.join(gyp_defines)
# We can't use windows path separators in GYP_GENERATOR_FLAGS since
# gyp uses shlex to parse them and treats '\' as an escape char.
gyp_env['GYP_GENERATOR_FLAGS'] = 'output_dir=%s' % out_dir.replace('\\', '/')
# Print relevant environment variables
for key, value in gyp_env.iteritems():
if key.startswith('GYP') or key in ('CC',):
print ' %s="%s"' % (key, value)
buildbot_common.Run(
[sys.executable, gyp_py_script, gyp_file, '--depth=.'],
cwd=SRC_DIR,
env=gyp_env)
NinjaBuild(targets, out_dir, arch)
def NinjaBuild(targets, out_dir, arch):
if type(targets) is not list:
targets = [targets]
out_config_dir = os.path.join(out_dir, GetConfigDir(arch))
buildbot_common.Run(['ninja', '-C', out_config_dir] + targets, cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains, build, clean):
buildbot_common.BuildStep('SDK Items')
if clean:
for dirname in glob.glob(os.path.join(OUT_DIR, GYPBUILD_DIR + '*')):
buildbot_common.RemoveDir(dirname)
build = True
if build:
GypNinjaBuild_NaCl(GYPBUILD_DIR)
GypNinjaBuild_Breakpad(GYPBUILD_DIR + '-x64')
if set(toolchains) & set(['x86_glibc', 'x86_newlib']):
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR + '-ia32',
['use_nacl_clang=0'])
GypNinjaBuild_PPAPI('x64', GYPBUILD_DIR + '-x64',
['use_nacl_clang=0'])
if 'arm_glibc' in toolchains:
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-arm',
['use_nacl_clang=0'] )
if 'pnacl' in toolchains:
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR + '-clang-ia32',
['use_nacl_clang=1'])
GypNinjaBuild_PPAPI('x64', GYPBUILD_DIR + '-clang-x64',
['use_nacl_clang=1'])
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-clang-arm',
['use_nacl_clang=1'])
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
GypNinjaBuild_Pnacl(build_dir, arch)
GypNinjaInstall(pepperdir, toolchains)
for toolchain in toolchains:
if toolchain not in ('host', 'clang-newlib'):
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, toolchain),
toolchain)
if 'pnacl' in toolchains:
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
if arch == 'ia32':
nacl_arches = ['x86-32', 'x86-64']
elif arch == 'arm':
nacl_arches = ['arm']
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
for nacl_arch in nacl_arches:
release_build_dir = os.path.join(OUT_DIR, build_dir, 'Release',
'gen', 'tc_pnacl_translate',
'lib-' + nacl_arch)
pnacldir = GetToolchainDir(pepperdir, 'pnacl')
pnacl_translator_lib_dir = GetPNaClTranslatorLib(pnacldir, nacl_arch)
if not os.path.isdir(pnacl_translator_lib_dir):
buildbot_common.ErrorExit('Expected %s directory to exist.' %
pnacl_translator_lib_dir)
buildbot_common.CopyFile(
os.path.join(release_build_dir, 'libpnacl_irt_shim.a'),
pnacl_translator_lib_dir)
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'x86'),
'pnacl')
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'arm'),
'pnacl')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
dsc_toolchains = []
for t in toolchains:
if t.startswith('x86_') or t.startswith('arm_'):
if t[4:] not in dsc_toolchains:
dsc_toolchains.append(t[4:])
elif t == 'host':
dsc_toolchains.append(getos.GetPlatform())
else:
dsc_toolchains.append(t)
filters['TOOLS'] = dsc_toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=dsc_toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, directory):
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Debug',
clean=True, config='Debug')
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Release',
clean=True, config='Release')
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
print 'SDK directory: %s' % pepperdir
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def Archive(filename, from_directory, step_link=True):
if buildbot_common.IsSDKBuilder():
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/'
else:
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk_test/'
bucket_path += build_version.ChromeVersion()
buildbot_common.Archive(filename, bucket_path, from_directory, step_link)
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
Archive(tarname, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
Archive(tarname + '.json', OUT_DIR, step_link=False)
def BuildStepBuildPNaClComponent(version, revision):
# Sadly revision can go backwords for a given version since when a version
# is built from master, revision will be a huge number (in the hundreds of
# thousands. Once the branch happens the revision will reset to zero.
# TODO(sbc): figure out how to compensate for this in some way such that
# revisions always go forward for a given version.
buildbot_common.BuildStep('PNaCl Component')
# Version numbers must follow the format specified in:
# https://developer.chrome.com/extensions/manifest/version
# So ensure that rev_major/rev_minor don't overflow and ensure there
# are no leading zeros.
if len(revision) > 4:
rev_minor = int(revision[-4:])
rev_major = int(revision[:-4])
version = "0.%s.%s.%s" % (version, rev_major, rev_minor)
else:
version = "0.%s.0.%s" % (version, revision)
buildbot_common.Run(['./make_pnacl_component.sh',
'pnacl_multicrx_%s.zip' % revision,
version], cwd=SCRIPT_DIR)
def BuildStepArchivePNaClComponent(revision):
buildbot_common.BuildStep('Archive PNaCl Component')
Archive('pnacl_multicrx_%s.zip' % revision, OUT_DIR)
def BuildStepArchiveSDKTools():
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
Archive('sdk_tools.tgz', OUT_DIR, step_link=False)
Archive('nacl_sdk.zip', OUT_DIR, step_link=False)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--qemu', help='Add qemu for ARM.',
action='store_true')
parser.add_argument('--tar', help='Force the tar step.',
action='store_true')
parser.add_argument('--archive', help='Force the archive step.',
action='store_true')
parser.add_argument('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_argument('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_argument('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_argument('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_argument('--no-clean', dest='clean', action='store_false',
help="Don't clean gypbuild directories")
parser.add_argument('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
parser.add_argument('--no-arm-trusted', action='store_true',
help='Disable building of ARM trusted components (sel_ldr, etc).')
parser.add_argument('--no-use-sysroot', action='store_true',
help='Disable building against sysroot.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options = parser.parse_args(args)
buildbot_common.BuildStep('build_sdk')
if buildbot_common.IsSDKBuilder():
options.archive = True
# TODO(binji): re-enable app_engine build when the linux builder stops
# breaking when trying to git clone from github.
# See http://crbug.com/412969.
options.build_app_engine = False
options.tar = True
# NOTE: order matters here. This will be the order that is specified in the
# Makefiles; the first toolchain will be the default.
toolchains = ['pnacl', 'x86_glibc', 'arm_glibc', 'clang-newlib', 'host']
print 'Building: ' + ' '.join(toolchains)
platform = getos.GetPlatform()
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
tarname = 'naclsdk_%s.tar.bz2' % platform
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if platform == 'linux':
# Linux-only: make sure the debian/stable sysroot image is installed
install_script = os.path.join(SRC_DIR, 'build', 'linux', 'sysroot_scripts',
'install-sysroot.py')
buildbot_common.Run([sys.executable, install_script, '--arch=arm'])
buildbot_common.Run([sys.executable, install_script, '--arch=i386'])
buildbot_common.Run([sys.executable, install_script, '--arch=amd64'])
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains(toolchains)
BuildStepUntarToolchains(pepperdir, toolchains)
if platform == 'linux':
buildbot_common.Move(os.path.join(pepperdir, 'toolchain', 'arm_trusted'),
os.path.join(OUT_DIR, 'arm_trusted'))
if platform == 'linux':
# Linux-only: Copy arm libraries from the arm_trusted package. These are
# needed to be able to run sel_ldr_arm under qemu.
arm_libs = [
'lib/arm-linux-gnueabihf/librt.so.1',
'lib/arm-linux-gnueabihf/libpthread.so.0',
'lib/arm-linux-gnueabihf/libgcc_s.so.1',
'lib/arm-linux-gnueabihf/libc.so.6',
'lib/arm-linux-gnueabihf/ld-linux-armhf.so.3',
'lib/arm-linux-gnueabihf/libm.so.6',
'usr/lib/arm-linux-gnueabihf/libstdc++.so.6'
]
arm_lib_dir = os.path.join(pepperdir, 'tools', 'lib', 'arm_trusted', 'lib')
buildbot_common.MakeDir(arm_lib_dir)
for arm_lib in arm_libs:
arm_lib = os.path.join(OUT_DIR, 'arm_trusted', arm_lib)
buildbot_common.CopyFile(arm_lib, arm_lib_dir)
buildbot_common.CopyFile(os.path.join(OUT_DIR, 'arm_trusted', 'qemu-arm'),
os.path.join(pepperdir, 'tools'))
BuildStepBuildToolchains(pepperdir, toolchains,
not options.skip_toolchain,
options.clean)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir, 'src')
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if platform == 'linux':
BuildStepBuildPNaClComponent(pepper_ver, chrome_revision)
if options.build_app_engine and platform == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
if options.qemu:
qemudir = os.path.join(NACL_DIR, 'toolchain', 'linux_arm-trusted')
oshelpers.Copy(['-r', qemudir, pepperdir])
# Archive the results on Google Cloud Storage.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
# Only archive sdk_tools/naclport/pnacl_component on linux.
if platform == 'linux':
BuildStepArchiveSDKTools()
BuildStepArchivePNaClComponent(chrome_revision)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
| bsd-3-clause |
teamstoreheddinge/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0b7.py | 253 | 4833 | data = (
'ddwim', # 0x00
'ddwib', # 0x01
'ddwibs', # 0x02
'ddwis', # 0x03
'ddwiss', # 0x04
'ddwing', # 0x05
'ddwij', # 0x06
'ddwic', # 0x07
'ddwik', # 0x08
'ddwit', # 0x09
'ddwip', # 0x0a
'ddwih', # 0x0b
'ddyu', # 0x0c
'ddyug', # 0x0d
'ddyugg', # 0x0e
'ddyugs', # 0x0f
'ddyun', # 0x10
'ddyunj', # 0x11
'ddyunh', # 0x12
'ddyud', # 0x13
'ddyul', # 0x14
'ddyulg', # 0x15
'ddyulm', # 0x16
'ddyulb', # 0x17
'ddyuls', # 0x18
'ddyult', # 0x19
'ddyulp', # 0x1a
'ddyulh', # 0x1b
'ddyum', # 0x1c
'ddyub', # 0x1d
'ddyubs', # 0x1e
'ddyus', # 0x1f
'ddyuss', # 0x20
'ddyung', # 0x21
'ddyuj', # 0x22
'ddyuc', # 0x23
'ddyuk', # 0x24
'ddyut', # 0x25
'ddyup', # 0x26
'ddyuh', # 0x27
'ddeu', # 0x28
'ddeug', # 0x29
'ddeugg', # 0x2a
'ddeugs', # 0x2b
'ddeun', # 0x2c
'ddeunj', # 0x2d
'ddeunh', # 0x2e
'ddeud', # 0x2f
'ddeul', # 0x30
'ddeulg', # 0x31
'ddeulm', # 0x32
'ddeulb', # 0x33
'ddeuls', # 0x34
'ddeult', # 0x35
'ddeulp', # 0x36
'ddeulh', # 0x37
'ddeum', # 0x38
'ddeub', # 0x39
'ddeubs', # 0x3a
'ddeus', # 0x3b
'ddeuss', # 0x3c
'ddeung', # 0x3d
'ddeuj', # 0x3e
'ddeuc', # 0x3f
'ddeuk', # 0x40
'ddeut', # 0x41
'ddeup', # 0x42
'ddeuh', # 0x43
'ddyi', # 0x44
'ddyig', # 0x45
'ddyigg', # 0x46
'ddyigs', # 0x47
'ddyin', # 0x48
'ddyinj', # 0x49
'ddyinh', # 0x4a
'ddyid', # 0x4b
'ddyil', # 0x4c
'ddyilg', # 0x4d
'ddyilm', # 0x4e
'ddyilb', # 0x4f
'ddyils', # 0x50
'ddyilt', # 0x51
'ddyilp', # 0x52
'ddyilh', # 0x53
'ddyim', # 0x54
'ddyib', # 0x55
'ddyibs', # 0x56
'ddyis', # 0x57
'ddyiss', # 0x58
'ddying', # 0x59
'ddyij', # 0x5a
'ddyic', # 0x5b
'ddyik', # 0x5c
'ddyit', # 0x5d
'ddyip', # 0x5e
'ddyih', # 0x5f
'ddi', # 0x60
'ddig', # 0x61
'ddigg', # 0x62
'ddigs', # 0x63
'ddin', # 0x64
'ddinj', # 0x65
'ddinh', # 0x66
'ddid', # 0x67
'ddil', # 0x68
'ddilg', # 0x69
'ddilm', # 0x6a
'ddilb', # 0x6b
'ddils', # 0x6c
'ddilt', # 0x6d
'ddilp', # 0x6e
'ddilh', # 0x6f
'ddim', # 0x70
'ddib', # 0x71
'ddibs', # 0x72
'ddis', # 0x73
'ddiss', # 0x74
'dding', # 0x75
'ddij', # 0x76
'ddic', # 0x77
'ddik', # 0x78
'ddit', # 0x79
'ddip', # 0x7a
'ddih', # 0x7b
'ra', # 0x7c
'rag', # 0x7d
'ragg', # 0x7e
'rags', # 0x7f
'ran', # 0x80
'ranj', # 0x81
'ranh', # 0x82
'rad', # 0x83
'ral', # 0x84
'ralg', # 0x85
'ralm', # 0x86
'ralb', # 0x87
'rals', # 0x88
'ralt', # 0x89
'ralp', # 0x8a
'ralh', # 0x8b
'ram', # 0x8c
'rab', # 0x8d
'rabs', # 0x8e
'ras', # 0x8f
'rass', # 0x90
'rang', # 0x91
'raj', # 0x92
'rac', # 0x93
'rak', # 0x94
'rat', # 0x95
'rap', # 0x96
'rah', # 0x97
'rae', # 0x98
'raeg', # 0x99
'raegg', # 0x9a
'raegs', # 0x9b
'raen', # 0x9c
'raenj', # 0x9d
'raenh', # 0x9e
'raed', # 0x9f
'rael', # 0xa0
'raelg', # 0xa1
'raelm', # 0xa2
'raelb', # 0xa3
'raels', # 0xa4
'raelt', # 0xa5
'raelp', # 0xa6
'raelh', # 0xa7
'raem', # 0xa8
'raeb', # 0xa9
'raebs', # 0xaa
'raes', # 0xab
'raess', # 0xac
'raeng', # 0xad
'raej', # 0xae
'raec', # 0xaf
'raek', # 0xb0
'raet', # 0xb1
'raep', # 0xb2
'raeh', # 0xb3
'rya', # 0xb4
'ryag', # 0xb5
'ryagg', # 0xb6
'ryags', # 0xb7
'ryan', # 0xb8
'ryanj', # 0xb9
'ryanh', # 0xba
'ryad', # 0xbb
'ryal', # 0xbc
'ryalg', # 0xbd
'ryalm', # 0xbe
'ryalb', # 0xbf
'ryals', # 0xc0
'ryalt', # 0xc1
'ryalp', # 0xc2
'ryalh', # 0xc3
'ryam', # 0xc4
'ryab', # 0xc5
'ryabs', # 0xc6
'ryas', # 0xc7
'ryass', # 0xc8
'ryang', # 0xc9
'ryaj', # 0xca
'ryac', # 0xcb
'ryak', # 0xcc
'ryat', # 0xcd
'ryap', # 0xce
'ryah', # 0xcf
'ryae', # 0xd0
'ryaeg', # 0xd1
'ryaegg', # 0xd2
'ryaegs', # 0xd3
'ryaen', # 0xd4
'ryaenj', # 0xd5
'ryaenh', # 0xd6
'ryaed', # 0xd7
'ryael', # 0xd8
'ryaelg', # 0xd9
'ryaelm', # 0xda
'ryaelb', # 0xdb
'ryaels', # 0xdc
'ryaelt', # 0xdd
'ryaelp', # 0xde
'ryaelh', # 0xdf
'ryaem', # 0xe0
'ryaeb', # 0xe1
'ryaebs', # 0xe2
'ryaes', # 0xe3
'ryaess', # 0xe4
'ryaeng', # 0xe5
'ryaej', # 0xe6
'ryaec', # 0xe7
'ryaek', # 0xe8
'ryaet', # 0xe9
'ryaep', # 0xea
'ryaeh', # 0xeb
'reo', # 0xec
'reog', # 0xed
'reogg', # 0xee
'reogs', # 0xef
'reon', # 0xf0
'reonj', # 0xf1
'reonh', # 0xf2
'reod', # 0xf3
'reol', # 0xf4
'reolg', # 0xf5
'reolm', # 0xf6
'reolb', # 0xf7
'reols', # 0xf8
'reolt', # 0xf9
'reolp', # 0xfa
'reolh', # 0xfb
'reom', # 0xfc
'reob', # 0xfd
'reobs', # 0xfe
'reos', # 0xff
)
| gpl-2.0 |
dragon96/nbproject | apps/django_remote_forms/widgets.py | 7 | 7767 | import datetime
from django.utils.dates import MONTHS
from django.utils.datastructures import SortedDict
class RemoteWidget(object):
def __init__(self, widget, field_name=None):
self.field_name = field_name
self.widget = widget
def as_dict(self):
widget_dict = SortedDict()
widget_dict['title'] = self.widget.__class__.__name__
widget_dict['is_hidden'] = self.widget.is_hidden
widget_dict['needs_multipart_form'] = self.widget.needs_multipart_form
widget_dict['is_localized'] = self.widget.is_localized
widget_dict['is_required'] = self.widget.is_required
widget_dict['attrs'] = self.widget.attrs
return widget_dict
class RemoteInput(RemoteWidget):
def as_dict(self):
widget_dict = super(RemoteInput, self).as_dict()
widget_dict['input_type'] = self.widget.input_type
return widget_dict
class RemoteTextInput(RemoteInput):
def as_dict(self):
return super(RemoteTextInput, self).as_dict()
class RemotePasswordInput(RemoteInput):
def as_dict(self):
return super(RemotePasswordInput, self).as_dict()
class RemoteHiddenInput(RemoteInput):
def as_dict(self):
return super(RemoteHiddenInput, self).as_dict()
class RemoteEmailInput(RemoteInput):
def as_dict(self):
widget_dict = super(RemoteEmailInput, self).as_dict()
widget_dict['title'] = 'TextInput'
widget_dict['input_type'] = 'text'
return widget_dict
class RemoteNumberInput(RemoteInput):
def as_dict(self):
widget_dict = super(RemoteNumberInput, self).as_dict()
widget_dict['title'] = 'TextInput'
widget_dict['input_type'] = 'text'
return widget_dict
class RemoteURLInput(RemoteInput):
def as_dict(self):
widget_dict = super(RemoteURLInput, self).as_dict()
widget_dict['title'] = 'TextInput'
widget_dict['input_type'] = 'text'
return widget_dict
class RemoteMultipleHiddenInput(RemoteHiddenInput):
def as_dict(self):
widget_dict = super(RemoteMultipleHiddenInput, self).as_dict()
widget_dict['choices'] = self.widget.choices
return widget_dict
class RemoteFileInput(RemoteInput):
def as_dict(self):
return super(RemoteFileInput, self).as_dict()
class RemoteClearableFileInput(RemoteFileInput):
def as_dict(self):
widget_dict = super(RemoteClearableFileInput, self).as_dict()
widget_dict['initial_text'] = self.widget.initial_text
widget_dict['input_text'] = self.widget.input_text
widget_dict['clear_checkbox_label'] = self.widget.clear_checkbox_label
return widget_dict
class RemoteTextarea(RemoteWidget):
def as_dict(self):
widget_dict = super(RemoteTextarea, self).as_dict()
widget_dict['input_type'] = 'textarea'
return widget_dict
class RemoteTimeInput(RemoteInput):
def as_dict(self):
widget_dict = super(RemoteTimeInput, self).as_dict()
widget_dict['format'] = self.widget.format
widget_dict['manual_format'] = self.widget.manual_format
widget_dict['date'] = self.widget.manual_format
widget_dict['input_type'] = 'time'
return widget_dict
class RemoteDateInput(RemoteTimeInput):
def as_dict(self):
widget_dict = super(RemoteDateInput, self).as_dict()
widget_dict['input_type'] = 'date'
current_year = datetime.datetime.now().year
widget_dict['choices'] = [{
'title': 'day',
'data': [{'key': x, 'value': x} for x in range(1, 32)]
}, {
'title': 'month',
'data': [{'key': x, 'value': y} for (x, y) in MONTHS.items()]
}, {
'title': 'year',
'data': [{'key': x, 'value': x} for x in range(current_year - 100, current_year + 1)]
}]
return widget_dict
class RemoteDateTimeInput(RemoteTimeInput):
def as_dict(self):
widget_dict = super(RemoteDateTimeInput, self).as_dict()
widget_dict['input_type'] = 'datetime'
return widget_dict
class RemoteCheckboxInput(RemoteWidget):
def as_dict(self):
widget_dict = super(RemoteCheckboxInput, self).as_dict()
# If check test is None then the input should accept null values
check_test = None
if self.widget.check_test is not None:
check_test = True
widget_dict['check_test'] = check_test
widget_dict['input_type'] = 'checkbox'
return widget_dict
class RemoteSelect(RemoteWidget):
def as_dict(self):
widget_dict = super(RemoteSelect, self).as_dict()
widget_dict['choices'] = []
for key, value in self.widget.choices:
widget_dict['choices'].append({
'value': key,
'display': value
})
widget_dict['input_type'] = 'select'
return widget_dict
class RemoteNullBooleanSelect(RemoteSelect):
def as_dict(self):
return super(RemoteNullBooleanSelect, self).as_dict()
class RemoteSelectMultiple(RemoteSelect):
def as_dict(self):
widget_dict = super(RemoteSelectMultiple, self).as_dict()
widget_dict['input_type'] = 'selectmultiple'
widget_dict['size'] = len(widget_dict['choices'])
return widget_dict
class RemoteRadioInput(RemoteWidget):
def as_dict(self):
widget_dict = SortedDict()
widget_dict['title'] = self.widget.__class__.__name__
widget_dict['name'] = self.widget.name
widget_dict['value'] = self.widget.value
widget_dict['attrs'] = self.widget.attrs
widget_dict['choice_value'] = self.widget.choice_value
widget_dict['choice_label'] = self.widget.choice_label
widget_dict['index'] = self.widget.index
widget_dict['input_type'] = 'radio'
return widget_dict
class RemoteRadioFieldRenderer(RemoteWidget):
def as_dict(self):
widget_dict = SortedDict()
widget_dict['title'] = self.widget.__class__.__name__
widget_dict['name'] = self.widget.name
widget_dict['value'] = self.widget.value
widget_dict['attrs'] = self.widget.attrs
widget_dict['choices'] = self.widget.choices
widget_dict['input_type'] = 'radio'
return widget_dict
class RemoteRadioSelect(RemoteSelect):
def as_dict(self):
widget_dict = super(RemoteRadioSelect, self).as_dict()
widget_dict['choices'] = []
for key, value in self.widget.choices:
widget_dict['choices'].append({
'name': self.field_name or '',
'value': key,
'display': value
})
widget_dict['input_type'] = 'radio'
return widget_dict
class RemoteCheckboxSelectMultiple(RemoteSelectMultiple):
def as_dict(self):
return super(RemoteCheckboxSelectMultiple, self).as_dict()
class RemoteMultiWidget(RemoteWidget):
def as_dict(self):
widget_dict = super(RemoteMultiWidget, self).as_dict()
widget_list = []
for widget in self.widget.widgets:
# Fetch remote widget and convert to dict
widget_list.append()
widget_dict['widgets'] = widget_list
return widget_dict
class RemoteSplitDateTimeWidget(RemoteMultiWidget):
def as_dict(self):
widget_dict = super(RemoteSplitDateTimeWidget, self).as_dict()
widget_dict['date_format'] = self.widget.date_format
widget_dict['time_format'] = self.widget.time_format
return widget_dict
class RemoteSplitHiddenDateTimeWidget(RemoteSplitDateTimeWidget):
def as_dict(self):
return super(RemoteSplitHiddenDateTimeWidget, self).as_dict()
| mit |
mgadi/naemonbox | sources/psdash/gevent-1.0.1/gevent/greenlet.py | 22 | 15658 | # Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
import sys
from gevent.hub import greenlet, getcurrent, get_hub, GreenletExit, Waiter, PY3, iwait, wait
from gevent.timeout import Timeout
from collections import deque
__all__ = ['Greenlet',
'joinall',
'killall']
class SpawnedLink(object):
"""A wrapper around link that calls it in another greenlet.
Can be called only from main loop.
"""
__slots__ = ['callback']
def __init__(self, callback):
if not callable(callback):
raise TypeError("Expected callable: %r" % (callback, ))
self.callback = callback
def __call__(self, source):
g = greenlet(self.callback, get_hub())
g.switch(source)
def __hash__(self):
return hash(self.callback)
def __eq__(self, other):
return self.callback == getattr(other, 'callback', other)
def __str__(self):
return str(self.callback)
def __repr__(self):
return repr(self.callback)
def __getattr__(self, item):
assert item != 'callback'
return getattr(self.callback, item)
class SuccessSpawnedLink(SpawnedLink):
"""A wrapper around link that calls it in another greenlet only if source succeed.
Can be called only from main loop.
"""
__slots__ = []
def __call__(self, source):
if source.successful():
return SpawnedLink.__call__(self, source)
class FailureSpawnedLink(SpawnedLink):
"""A wrapper around link that calls it in another greenlet only if source failed.
Can be called only from main loop.
"""
__slots__ = []
def __call__(self, source):
if not source.successful():
return SpawnedLink.__call__(self, source)
class Greenlet(greenlet):
"""A light-weight cooperatively-scheduled execution unit."""
def __init__(self, run=None, *args, **kwargs):
hub = get_hub()
greenlet.__init__(self, parent=hub)
if run is not None:
self._run = run
self.args = args
self.kwargs = kwargs
self._links = deque()
self.value = None
self._exception = _NONE
self._notifier = None
self._start_event = None
@property
def loop(self):
# needed by killall
return self.parent.loop
if PY3:
def __bool__(self):
return self._start_event is not None and self._exception is _NONE
else:
def __nonzero__(self):
return self._start_event is not None and self._exception is _NONE
@property
def started(self):
# DEPRECATED
return bool(self)
def ready(self):
"""Return true if and only if the greenlet has finished execution."""
return self.dead or self._exception is not _NONE
def successful(self):
"""Return true if and only if the greenlet has finished execution successfully,
that is, without raising an error."""
return self._exception is None
def __repr__(self):
classname = self.__class__.__name__
result = '<%s at %s' % (classname, hex(id(self)))
formatted = self._formatinfo()
if formatted:
result += ': ' + formatted
return result + '>'
def _formatinfo(self):
try:
return self._formatted_info
except AttributeError:
pass
try:
result = getfuncname(self.__dict__['_run'])
except Exception:
pass
else:
args = []
if self.args:
args = [repr(x)[:50] for x in self.args]
if self.kwargs:
args.extend(['%s=%s' % (key, repr(value)[:50]) for (key, value) in self.kwargs.items()])
if args:
result += '(' + ', '.join(args) + ')'
# it is important to save the result here, because once the greenlet exits '_run' attribute will be removed
self._formatted_info = result
return result
return ''
@property
def exception(self):
"""Holds the exception instance raised by the function if the greenlet has finished with an error.
Otherwise ``None``.
"""
if self._exception is not _NONE:
return self._exception
def throw(self, *args):
"""Immediatelly switch into the greenlet and raise an exception in it.
Should only be called from the HUB, otherwise the current greenlet is left unscheduled forever.
To raise an exception in a safely manner from any greenlet, use :meth:`kill`.
If a greenlet was started but never switched to yet, then also
a) cancel the event that will start it
b) fire the notifications as if an exception was raised in a greenlet
"""
if self._start_event is None:
self._start_event = _dummy_event
else:
self._start_event.stop()
try:
greenlet.throw(self, *args)
finally:
if self._exception is _NONE and self.dead:
# the greenlet was never switched to before and it will never be, _report_error was not called
# the result was not set and the links weren't notified. let's do it here.
# checking that self.dead is true is essential, because throw() does not necessarily kill the greenlet
# (if the exception raised by throw() is caught somewhere inside the greenlet).
if len(args) == 1:
arg = args[0]
#if isinstance(arg, type):
if type(arg) is type(Exception):
args = (arg, arg(), None)
else:
args = (type(arg), arg, None)
elif not args:
args = (GreenletExit, GreenletExit(), None)
self._report_error(args)
def start(self):
"""Schedule the greenlet to run in this loop iteration"""
if self._start_event is None:
self._start_event = self.parent.loop.run_callback(self.switch)
def start_later(self, seconds):
"""Schedule the greenlet to run in the future loop iteration *seconds* later"""
if self._start_event is None:
self._start_event = self.parent.loop.timer(seconds)
self._start_event.start(self.switch)
@classmethod
def spawn(cls, *args, **kwargs):
"""Return a new :class:`Greenlet` object, scheduled to start.
The arguments are passed to :meth:`Greenlet.__init__`.
"""
g = cls(*args, **kwargs)
g.start()
return g
@classmethod
def spawn_later(cls, seconds, *args, **kwargs):
"""Return a Greenlet object, scheduled to start *seconds* later.
The arguments are passed to :meth:`Greenlet.__init__`.
"""
g = cls(*args, **kwargs)
g.start_later(seconds)
return g
def kill(self, exception=GreenletExit, block=True, timeout=None):
"""Raise the exception in the greenlet.
If block is ``True`` (the default), wait until the greenlet dies or the optional timeout expires.
If block is ``False``, the current greenlet is not unscheduled.
The function always returns ``None`` and never raises an error.
`Changed in version 0.13.0:` *block* is now ``True`` by default.
"""
# XXX this function should not switch out if greenlet is not started but it does
# XXX fix it (will have to override 'dead' property of greenlet.greenlet)
if self._start_event is None:
self._start_event = _dummy_event
else:
self._start_event.stop()
if not self.dead:
waiter = Waiter()
self.parent.loop.run_callback(_kill, self, exception, waiter)
if block:
waiter.get()
self.join(timeout)
# it should be OK to use kill() in finally or kill a greenlet from more than one place;
# thus it should not raise when the greenlet is already killed (= not started)
def get(self, block=True, timeout=None):
"""Return the result the greenlet has returned or re-raise the exception it has raised.
If block is ``False``, raise :class:`gevent.Timeout` if the greenlet is still alive.
If block is ``True``, unschedule the current greenlet until the result is available
or the timeout expires. In the latter case, :class:`gevent.Timeout` is raised.
"""
if self.ready():
if self.successful():
return self.value
else:
raise self._exception
if block:
switch = getcurrent().switch
self.rawlink(switch)
try:
t = Timeout.start_new(timeout)
try:
result = self.parent.switch()
assert result is self, 'Invalid switch into Greenlet.get(): %r' % (result, )
finally:
t.cancel()
except:
# unlinking in 'except' instead of finally is an optimization:
# if switch occurred normally then link was already removed in _notify_links
# and there's no need to touch the links set.
# Note, however, that if "Invalid switch" assert was removed and invalid switch
# did happen, the link would remain, causing another invalid switch later in this greenlet.
self.unlink(switch)
raise
if self.ready():
if self.successful():
return self.value
else:
raise self._exception
else:
raise Timeout
def join(self, timeout=None):
"""Wait until the greenlet finishes or *timeout* expires.
Return ``None`` regardless.
"""
if self.ready():
return
else:
switch = getcurrent().switch
self.rawlink(switch)
try:
t = Timeout.start_new(timeout)
try:
result = self.parent.switch()
assert result is self, 'Invalid switch into Greenlet.join(): %r' % (result, )
finally:
t.cancel()
except Timeout:
self.unlink(switch)
if sys.exc_info()[1] is not t:
raise
except:
self.unlink(switch)
raise
def _report_result(self, result):
self._exception = None
self.value = result
if self._links and not self._notifier:
self._notifier = self.parent.loop.run_callback(self._notify_links)
def _report_error(self, exc_info):
exception = exc_info[1]
if isinstance(exception, GreenletExit):
self._report_result(exception)
return
self._exception = exception
if self._links and not self._notifier:
self._notifier = self.parent.loop.run_callback(self._notify_links)
self.parent.handle_error(self, *exc_info)
def run(self):
try:
if self._start_event is None:
self._start_event = _dummy_event
else:
self._start_event.stop()
try:
result = self._run(*self.args, **self.kwargs)
except:
self._report_error(sys.exc_info())
return
self._report_result(result)
finally:
self.__dict__.pop('_run', None)
self.__dict__.pop('args', None)
self.__dict__.pop('kwargs', None)
def rawlink(self, callback):
"""Register a callable to be executed when the greenlet finishes the execution.
WARNING: the callable will be called in the HUB greenlet.
"""
if not callable(callback):
raise TypeError('Expected callable: %r' % (callback, ))
self._links.append(callback)
if self.ready() and self._links and not self._notifier:
self._notifier = self.parent.loop.run_callback(self._notify_links)
def link(self, callback, SpawnedLink=SpawnedLink):
"""Link greenlet's completion to a callable.
The *callback* will be called with this instance as an argument
once this greenlet's dead. A callable is called in its own greenlet.
"""
self.rawlink(SpawnedLink(callback))
def unlink(self, callback):
"""Remove the callback set by :meth:`link` or :meth:`rawlink`"""
try:
self._links.remove(callback)
except ValueError:
pass
def link_value(self, callback, SpawnedLink=SuccessSpawnedLink):
"""Like :meth:`link` but *callback* is only notified when the greenlet has completed successfully"""
self.link(callback, SpawnedLink=SpawnedLink)
def link_exception(self, callback, SpawnedLink=FailureSpawnedLink):
"""Like :meth:`link` but *callback* is only notified when the greenlet dies because of unhandled exception"""
self.link(callback, SpawnedLink=SpawnedLink)
def _notify_links(self):
while self._links:
link = self._links.popleft()
try:
link(self)
except:
self.parent.handle_error((link, self), *sys.exc_info())
class _dummy_event(object):
def stop(self):
pass
_dummy_event = _dummy_event()
def _kill(greenlet, exception, waiter):
try:
greenlet.throw(exception)
except:
# XXX do we need this here?
greenlet.parent.handle_error(greenlet, *sys.exc_info())
waiter.switch()
def joinall(greenlets, timeout=None, raise_error=False, count=None):
if not raise_error:
wait(greenlets, timeout=timeout)
else:
for obj in iwait(greenlets, timeout=timeout):
if getattr(obj, 'exception', None) is not None:
raise obj.exception
if count is not None:
count -= 1
if count <= 0:
break
def _killall3(greenlets, exception, waiter):
diehards = []
for g in greenlets:
if not g.dead:
try:
g.throw(exception)
except:
g.parent.handle_error(g, *sys.exc_info())
if not g.dead:
diehards.append(g)
waiter.switch(diehards)
def _killall(greenlets, exception):
for g in greenlets:
if not g.dead:
try:
g.throw(exception)
except:
g.parent.handle_error(g, *sys.exc_info())
def killall(greenlets, exception=GreenletExit, block=True, timeout=None):
if not greenlets:
return
loop = greenlets[0].loop
if block:
waiter = Waiter()
loop.run_callback(_killall3, greenlets, exception, waiter)
t = Timeout.start_new(timeout)
try:
alive = waiter.get()
if alive:
joinall(alive, raise_error=False)
finally:
t.cancel()
else:
loop.run_callback(_killall, greenlets, exception)
if PY3:
_meth_self = "__self__"
else:
_meth_self = "im_self"
def getfuncname(func):
if not hasattr(func, _meth_self):
try:
funcname = func.__name__
except AttributeError:
pass
else:
if funcname != '<lambda>':
return funcname
return repr(func)
_NONE = Exception("Neither exception nor value")
| gpl-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/jedi/evaluate/finder.py | 3 | 14884 | """
Searching for names with given scope and name. This is very central in Jedi and
Python. The name resolution is quite complicated with descripter,
``__getattribute__``, ``__getattr__``, ``global``, etc.
If you want to understand name resolution, please read the first few chapters
in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/.
Flow checks
+++++++++++
Flow checks are not really mature. There's only a check for ``isinstance``. It
would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
check for -> a is a string). There's big potential in these checks.
"""
from jedi.parser import tree
from jedi import debug
from jedi.common import unite
from jedi import settings
from jedi.evaluate import representation as er
from jedi.evaluate.instance import AbstractInstanceContext
from jedi.evaluate import compiled
from jedi.evaluate import pep0484
from jedi.evaluate import iterable
from jedi.evaluate import imports
from jedi.evaluate import analysis
from jedi.evaluate import flow_analysis
from jedi.evaluate import param
from jedi.evaluate import helpers
from jedi.evaluate.filters import get_global_filters
class NameFinder(object):
def __init__(self, evaluator, context, name_context, name_or_str, position=None):
self._evaluator = evaluator
# Make sure that it's not just a syntax tree node.
self._context = context
self._name_context = name_context
self._name = name_or_str
if isinstance(name_or_str, tree.Name):
self._string_name = name_or_str.value
else:
self._string_name = name_or_str
self._position = position
self._found_predefined_types = None
@debug.increase_indent
def find(self, filters, attribute_lookup):
"""
:params bool attribute_lookup: Tell to logic if we're accessing the
attribute or the contents of e.g. a function.
"""
names = self.filter_name(filters)
if self._found_predefined_types is not None and names:
check = flow_analysis.reachability_check(
self._context, self._context.tree_node, self._name)
if check is flow_analysis.UNREACHABLE:
return set()
return self._found_predefined_types
types = self._names_to_types(names, attribute_lookup)
if not names and not types \
and not (isinstance(self._name, tree.Name) and
isinstance(self._name.parent.parent, tree.Param)):
if isinstance(self._name, tree.Name):
if attribute_lookup:
analysis.add_attribute_error(
self._name_context, self._context, self._name)
else:
message = ("NameError: name '%s' is not defined."
% self._string_name)
analysis.add(self._name_context, 'name-error', self._name, message)
return types
def _get_origin_scope(self):
if isinstance(self._name, tree.Name):
return self._name.get_parent_until(tree.Scope, reverse=True)
else:
return None
def get_filters(self, search_global=False):
origin_scope = self._get_origin_scope()
if search_global:
return get_global_filters(self._evaluator, self._context, self._position, origin_scope)
else:
return self._context.get_filters(search_global, self._position, origin_scope=origin_scope)
def filter_name(self, filters):
"""
Searches names that are defined in a scope (the different
``filters``), until a name fits.
"""
names = []
if self._context.predefined_names:
# TODO is this ok? node might not always be a tree.Name
node = self._name
while node is not None and not node.is_scope():
node = node.parent
if node.type in ("if_stmt", "for_stmt", "comp_for"):
try:
name_dict = self._context.predefined_names[node]
types = name_dict[self._string_name]
except KeyError:
continue
else:
self._found_predefined_types = types
break
for filter in filters:
names = filter.get(self._name)
if names:
break
debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self._string_name,
self._context, names, self._position)
return list(names)
def _check_getattr(self, inst):
"""Checks for both __getattr__ and __getattribute__ methods"""
# str is important, because it shouldn't be `Name`!
name = compiled.create(self._evaluator, self._string_name)
# This is a little bit special. `__getattribute__` is in Python
# executed before `__getattr__`. But: I know no use case, where
# this could be practical and where Jedi would return wrong types.
# If you ever find something, let me know!
# We are inversing this, because a hand-crafted `__getattribute__`
# could still call another hand-crafted `__getattr__`, but not the
# other way around.
names = (inst.get_function_slot_names('__getattr__') or
inst.get_function_slot_names('__getattribute__'))
return inst.execute_function_slots(names, name)
def _names_to_types(self, names, attribute_lookup):
types = set()
types = unite(name.infer() for name in names)
debug.dbg('finder._names_to_types: %s -> %s', names, types)
if not names and isinstance(self._context, AbstractInstanceContext):
# handling __getattr__ / __getattribute__
return self._check_getattr(self._context)
# Add isinstance and other if/assert knowledge.
if not types and isinstance(self._name, tree.Name) and \
not isinstance(self._name_context, AbstractInstanceContext):
flow_scope = self._name
base_node = self._name_context.tree_node
if base_node.type == 'comp_for':
return types
while True:
flow_scope = flow_scope.get_parent_scope(include_flows=True)
n = _check_flow_information(self._name_context, flow_scope,
self._name, self._position)
if n is not None:
return n
if flow_scope == base_node:
break
return types
def _name_to_types(evaluator, context, tree_name):
types = []
node = tree_name.get_definition()
if node.isinstance(tree.ForStmt):
types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
if types:
return types
if node.isinstance(tree.WithStmt):
types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
if types:
return types
if node.type in ('for_stmt', 'comp_for'):
try:
types = context.predefined_names[node][tree_name.value]
except KeyError:
container_types = context.eval_node(node.children[3])
for_types = iterable.py__iter__types(evaluator, container_types, node.children[3])
types = check_tuple_assignments(evaluator, for_types, tree_name)
elif node.isinstance(tree.ExprStmt):
types = _remove_statements(evaluator, context, node, tree_name)
elif node.isinstance(tree.WithStmt):
types = context.eval_node(node.node_from_name(tree_name))
elif isinstance(node, tree.Import):
types = imports.infer_import(context, tree_name)
elif node.type in ('funcdef', 'classdef'):
types = _apply_decorators(evaluator, context, node)
elif node.type == 'global_stmt':
context = evaluator.create_context(context, tree_name)
finder = NameFinder(evaluator, context, context, str(tree_name))
filters = finder.get_filters(search_global=True)
# For global_stmt lookups, we only need the first possible scope,
# which means the function itself.
filters = [next(filters)]
types += finder.find(filters, attribute_lookup=False)
elif isinstance(node, tree.TryStmt):
# TODO an exception can also be a tuple. Check for those.
# TODO check for types that are not classes and add it to
# the static analysis report.
exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
types = unite(
evaluator.execute(t, param.ValuesArguments([]))
for t in exceptions
)
else:
raise ValueError("Should not happen.")
return types
def _apply_decorators(evaluator, context, node):
"""
Returns the function, that should to be executed in the end.
This is also the places where the decorators are processed.
"""
if node.type == 'classdef':
decoratee_context = er.ClassContext(
evaluator,
parent_context=context,
classdef=node
)
else:
decoratee_context = er.FunctionContext(
evaluator,
parent_context=context,
funcdef=node
)
initial = values = set([decoratee_context])
for dec in reversed(node.get_decorators()):
debug.dbg('decorator: %s %s', dec, values)
dec_values = context.eval_node(dec.children[1])
trailer_nodes = dec.children[2:-1]
if trailer_nodes:
# Create a trailer and evaluate it.
trailer = tree.Node('trailer', trailer_nodes)
trailer.parent = dec
dec_values = evaluator.eval_trailer(context, dec_values, trailer)
if not len(dec_values):
debug.warning('decorator not found: %s on %s', dec, node)
return initial
values = unite(dec_value.execute(param.ValuesArguments([values]))
for dec_value in dec_values)
if not len(values):
debug.warning('not possible to resolve wrappers found %s', node)
return initial
debug.dbg('decorator end %s', values)
return values
def _remove_statements(evaluator, context, stmt, name):
"""
This is the part where statements are being stripped.
Due to lazy evaluation, statements like a = func; b = a; b() have to be
evaluated.
"""
types = set()
check_instance = None
pep0484types = \
pep0484.find_type_from_comment_hint_assign(context, stmt, name)
if pep0484types:
return pep0484types
types |= context.eval_stmt(stmt, seek_name=name)
if check_instance is not None:
# class renames
types = set([er.get_instance_el(evaluator, check_instance, a, True)
if isinstance(a, (er.Function, tree.Function))
else a for a in types])
return types
def _check_flow_information(context, flow, search_name, pos):
""" Try to find out the type of a variable just with the information that
is given by the flows: e.g. It is also responsible for assert checks.::
if isinstance(k, str):
k. # <- completion here
ensures that `k` is a string.
"""
if not settings.dynamic_flow_information:
return None
result = None
if flow.is_scope():
# Check for asserts.
module_node = flow.get_root_node()
try:
names = module_node.used_names[search_name.value]
except KeyError:
return None
names = reversed([
n for n in names
if flow.start_pos <= n.start_pos < (pos or flow.end_pos)
])
for name in names:
ass = tree.search_ancestor(name, 'assert_stmt')
if ass is not None:
result = _check_isinstance_type(context, ass.assertion(), search_name)
if result is not None:
return result
if isinstance(flow, (tree.IfStmt, tree.WhileStmt)):
potential_ifs = [c for c in flow.children[1::4] if c != ':']
for if_test in reversed(potential_ifs):
if search_name.start_pos > if_test.end_pos:
return _check_isinstance_type(context, if_test, search_name)
return result
def _check_isinstance_type(context, element, search_name):
try:
assert element.type in ('power', 'atom_expr')
# this might be removed if we analyze and, etc
assert len(element.children) == 2
first, trailer = element.children
assert isinstance(first, tree.Name) and first.value == 'isinstance'
assert trailer.type == 'trailer' and trailer.children[0] == '('
assert len(trailer.children) == 3
# arglist stuff
arglist = trailer.children[1]
args = param.TreeArguments(context.evaluator, context, arglist, trailer)
param_list = list(args.unpack())
# Disallow keyword arguments
assert len(param_list) == 2
(key1, lazy_context_object), (key2, lazy_context_cls) = param_list
assert key1 is None and key2 is None
call = helpers.call_of_leaf(search_name)
is_instance_call = helpers.call_of_leaf(lazy_context_object.data)
# Do a simple get_code comparison. They should just have the same code,
# and everything will be all right.
assert is_instance_call.get_code(normalized=True) == call.get_code(normalized=True)
except AssertionError:
return None
result = set()
for cls_or_tup in lazy_context_cls.infer():
if isinstance(cls_or_tup, iterable.AbstractSequence) and \
cls_or_tup.array_type == 'tuple':
for lazy_context in cls_or_tup.py__iter__():
for context in lazy_context.infer():
result |= context.execute_evaluated()
else:
result |= cls_or_tup.execute_evaluated()
return result
def check_tuple_assignments(evaluator, types, name):
"""
Checks if tuples are assigned.
"""
lazy_context = None
for index, node in name.assignment_indexes():
iterated = iterable.py__iter__(evaluator, types, node)
for _ in range(index + 1):
try:
lazy_context = next(iterated)
except StopIteration:
# We could do this with the default param in next. But this
# would allow this loop to run for a very long time if the
# index number is high. Therefore break if the loop is
# finished.
return set()
types = lazy_context.infer()
return types
| gpl-3.0 |
avlach/univbris-ocf | optin_manager/src/python/openflow/common/rpc4django/tests/test_rpcviews.py | 4 | 7325 | '''
Views Tests
-------------------------
'''
import unittest
import xmlrpclib
from rpc4django.jsonrpcdispatcher import json, JSONRPC_SERVICE_ERROR
from expedient.common.tests.manager import SettingsTestCase
from django.core.urlresolvers import reverse
class TestRPCViews(SettingsTestCase):
urls = "expedient.common.rpc4django.tests.test_urls"
def setUp(self):
self.settings_manager.set(
INSTALLED_APPS=(
"expedient.common.rpc4django",
"expedient.common.rpc4django.tests.testmod",
),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
)
self.rpc_path = reverse("serve_rpc_request")
self.ns_rpc_path = reverse("my_url_name")
def test_methodsummary(self):
response = self.client.get(self.rpc_path)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template.name, 'rpc4django/rpcmethod_summary.html')
def test_xmlrequests(self):
data = '<?xml version="1.0"?><methodCall><methodName>system.listMethods</methodName><params></params></methodCall>'
response = self.client.post(self.rpc_path, data, 'text/xml')
self.assertEqual(response.status_code, 200)
xmlrpclib.loads(response.content) # this will throw an exception with bad data
def test_jsonrequests(self):
data = '{"params":[],"method":"system.listMethods","id":123}'
response = self.client.post(self.rpc_path, data, 'application/json')
self.assertEqual(response.status_code, 200)
jsondict = json.loads(response.content)
self.assertTrue(jsondict['error'] is None)
self.assertEqual(jsondict['id'], 123)
self.assertTrue(isinstance(jsondict['result'], list))
data = '{"params":[],"method":"system.describe","id":456}'
response = self.client.post(self.rpc_path, data, 'text/javascript')
self.assertEqual(response.status_code, 200)
jsondict = json.loads(response.content)
self.assertTrue(jsondict['error'] is None)
self.assertEqual(jsondict['id'], 456)
self.assertTrue(isinstance(jsondict['result'], dict))
def test_typedetection(self):
data = '{"params":[],"method":"system.listMethods","id":123}'
response = self.client.post(self.rpc_path, data, 'text/plain')
self.assertEqual(response.status_code, 200)
jsondict = json.loads(response.content)
self.assertTrue(jsondict['error'] is None)
self.assertEqual(jsondict['id'], 123)
self.assertTrue(isinstance(jsondict['result'], list))
data = '<?xml version="1.0"?><methodCall><methodName>system.listMethods</methodName><params></params></methodCall>'
response = self.client.post(self.rpc_path, data, 'text/plain')
self.assertEqual(response.status_code, 200)
xmlrpclib.loads(response.content) # this will throw an exception with bad data
# jsonrpc request with xmlrpc data (should be error)
data = '<?xml version="1.0"?><methodCall><methodName>system.listMethods</methodName><params></params></methodCall>'
response = self.client.post(self.rpc_path, data, 'application/json')
self.assertEqual(response.status_code, 200)
jsondict = json.loads(response.content)
self.assertTrue(jsondict['result'] is None)
self.assertEqual(jsondict['id'], '')
self.assertTrue(isinstance(jsondict['error'], dict))
data = '{"params":[],"method":"system.listMethods","id":123}'
try:
response = self.client.post(self.rpc_path, data, 'text/xml')
except:
# for some reason, this throws an expat error
# but only in python 2.4
return
self.assertEqual(response.status_code, 200)
try:
xmlrpclib.loads(response.content)
self.fail('parse error expected')
except xmlrpclib.Fault:
pass
def test_badrequests(self):
data = '{"params":[],"method":"system.methodHelp","id":456}'
response = self.client.post(self.rpc_path, data, 'application/json')
self.assertEqual(response.status_code, 200)
jsondict = json.loads(response.content)
self.assertTrue(jsondict['error'] is not None)
self.assertEqual(jsondict['id'], 456)
self.assertTrue(jsondict['result'] is None)
self.assertEqual(jsondict['error']['code'], JSONRPC_SERVICE_ERROR)
data = '<?xml version="1.0"?><methodCall><methodName>method.N0t.Exists</methodName><params></params></methodCall>'
response = self.client.post(self.rpc_path, data, 'text/xml')
self.assertEqual(response.status_code, 200)
try:
xmlrpclib.loads(response.content)
self.fail('parse error expected')
except xmlrpclib.Fault, fault:
self.assertEqual(fault.faultCode, 1)
def test_httpaccesscontrol(self):
import django
t = django.VERSION
if t[0] < 1 or (t[0] == 1 and t[1] < 1):
# options requests can only be tested by django 1.1+
self.fail('This version of django "%s" does not support http access control' %str(t))
response = self.client.options(self.rpc_path, '', 'text/plain')
self.assertEqual(response['Access-Control-Allow-Methods'], 'POST, GET, OPTIONS')
self.assertEqual(response['Access-Control-Max-Age'], '0')
def test_good_url_name(self):
"""
Make sure we call functions based on the url they are arriving on.
"""
data = xmlrpclib.dumps((5, 4), "subtract")
response = self.client.post(self.rpc_path, data, 'text/xml')
self.assertEqual(response.status_code, 200)
result, methname = xmlrpclib.loads(response.content)
self.assertEqual(result, (1,))
self.assertEqual(methname, None)
data = xmlrpclib.dumps((5, 4), "product")
response = self.client.post(self.ns_rpc_path, data, 'text/xml')
self.assertEqual(response.status_code, 200)
result, methname = xmlrpclib.loads(response.content)
self.assertEqual(result, (20,))
self.assertEqual(methname, None)
def test_bad_url_name(self):
"""
Make sure we cannot call functions using the wrong url_name.
"""
data = xmlrpclib.dumps((5, 4), "subtract")
response = self.client.post(self.ns_rpc_path, data, 'text/xml')
self.assertEqual(response.status_code, 200)
try:
result, methname = xmlrpclib.loads(response.content)
self.fail("Expected xmlrpclib Fault")
except xmlrpclib.Fault as fault:
self.assertEqual(fault.faultCode, 1)
self.assertTrue(fault.faultString.endswith('method "subtract" is not supported'))
if __name__ == '__main__':
unittest.main() | bsd-3-clause |
lanthaler/schemaorg | lib/rdflib/plugins/parsers/pyRdfa/transform/prototype.py | 23 | 1357 | # -*- coding: utf-8 -*-
"""
Encoding of the RDFa prototype vocabulary behavior. This means processing the graph by adding and removing triples
based on triples using the rdfa:Prototype and rdfa:ref class and property, respectively. For details, see the HTML5+RDFa document.
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@contact: Ivan Herman, ivan@w3.org
@version: $Id: prototype.py,v 1.1 2013-01-18 09:41:49 ivan Exp $
$Date: 2013-01-18 09:41:49 $
"""
import rdflib
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
else :
from rdflib.RDF import RDFNS as ns_rdf
from .. import ns_rdfa
Prototype = ns_rdfa["Pattern"]
pref = ns_rdfa["copy"]
def handle_prototypes(graph) :
to_remove = set()
for (x,ref,PR) in graph.triples((None,pref,None)) :
if (PR,ns_rdf["type"],Prototype) in graph :
to_remove.add((PR,ns_rdf["type"],Prototype))
to_remove.add((x,ref,PR))
# there is a reference to a prototype here
for (PR,p,y) in graph.triples((PR,None,None)) :
if not ( p == ns_rdf["type"] and y == Prototype ) :
graph.add((x,p,y))
to_remove.add((PR,p,y))
for t in to_remove : graph.remove(t) | apache-2.0 |
lra/boto | boto/ec2/elb/securitygroup.py | 152 | 1576 | # Copyright (c) 2010 Reza Lotun http://reza.lotun.name
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class SecurityGroup(object):
def __init__(self, connection=None):
self.name = None
self.owner_alias = None
def __repr__(self):
return 'SecurityGroup(%s, %s)' % (self.name, self.owner_alias)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'GroupName':
self.name = value
elif name == 'OwnerAlias':
self.owner_alias = value
| mit |
Azulinho/ansible | lib/ansible/modules/cloud/cloudstack/cs_iso.py | 49 | 14061 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_iso
short_description: Manages ISO images on Apache CloudStack based clouds.
description:
- Register and remove ISO images.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the ISO.
required: true
display_text:
description:
- Display text of the ISO.
- If not specified, C(name) will be used.
required: false
default: null
version_added: "2.4"
url:
description:
- URL where the ISO can be downloaded from. Required if C(state) is present.
required: false
default: null
os_type:
description:
- Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present.
required: false
default: null
is_ready:
description:
- This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g.
successfully downloaded and installed. Recommended to set it to C(false).
required: false
default: false
is_public:
description:
- Register the ISO to be publicly available to all users. Only used if C(state) is present.
required: false
default: null
is_featured:
description:
- Register the ISO to be featured. Only used if C(state) is present.
required: false
default: null
is_dynamically_scalable:
description:
- Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present.
required: false
default: null
checksum:
description:
- The MD5 checksum value of this ISO. If set, we search by checksum instead of name.
required: false
default: null
bootable:
description:
- Register the ISO to be bootable. Only used if C(state) is present.
required: false
default: null
domain:
description:
- Domain the ISO is related to.
required: false
default: null
account:
description:
- Account the ISO is related to.
required: false
default: null
project:
description:
- Name of the project the ISO to be registered in.
required: false
default: null
zone:
description:
- Name of the zone you wish the ISO to be registered or deleted from.
- If not specified, first zone found will be used.
required: false
default: null
cross_zones:
description:
- Whether the ISO should be synced or removed across zones.
- Mutually exclusive with C(zone).
required: false
default: false
version_added: "2.4"
iso_filter:
description:
- Name of the filter used to search for the ISO.
required: false
default: 'self'
choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]
state:
description:
- State of the ISO.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
version_added: "2.3"
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
aliases: [ 'tag' ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Register an ISO if ISO name does not already exist.
- local_action:
module: cs_iso
name: Debian 7 64-bit
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
os_type: Debian GNU/Linux 7(64-bit)
# Register an ISO with given name if ISO md5 checksum does not already exist.
- local_action:
module: cs_iso
name: Debian 7 64-bit
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
os_type: Debian GNU/Linux 7(64-bit)
checksum: 0b31bccccb048d20b551f70830bb7ad0
# Remove an ISO by name
- local_action:
module: cs_iso
name: Debian 7 64-bit
state: absent
# Remove an ISO by checksum
- local_action:
module: cs_iso
name: Debian 7 64-bit
checksum: 0b31bccccb048d20b551f70830bb7ad0
state: absent
'''
RETURN = '''
---
id:
description: UUID of the ISO.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the ISO.
returned: success
type: string
sample: Debian 7 64-bit
display_text:
description: Text to be displayed of the ISO.
returned: success
type: string
sample: Debian 7.7 64-bit minimal 2015-03-19
zone:
description: Name of zone the ISO is registered in.
returned: success
type: string
sample: zuerich
status:
description: Status of the ISO.
returned: success
type: string
sample: Successfully Installed
is_ready:
description: True if the ISO is ready to be deployed from.
returned: success
type: boolean
sample: true
is_public:
description: True if the ISO is public.
returned: success
type: boolean
sample: true
version_added: "2.4"
bootable:
description: True if the ISO is bootable.
returned: success
type: boolean
sample: true
version_added: "2.4"
is_featured:
description: True if the ISO is featured.
returned: success
type: boolean
sample: true
version_added: "2.4"
format:
description: Format of the ISO.
returned: success
type: string
sample: ISO
version_added: "2.4"
os_type:
description: Typo of the OS.
returned: success
type: string
sample: CentOS 6.5 (64-bit)
version_added: "2.4"
checksum:
description: MD5 checksum of the ISO.
returned: success
type: string
sample: 0b31bccccb048d20b551f70830bb7ad0
created:
description: Date of registering.
returned: success
type: string
sample: 2015-03-29T14:57:06+0200
cross_zones:
description: true if the ISO is managed across all zones, false otherwise.
returned: success
type: boolean
sample: false
version_added: "2.4"
domain:
description: Domain the ISO is related to.
returned: success
type: string
sample: example domain
account:
description: Account the ISO is related to.
returned: success
type: string
sample: example account
project:
description: Project the ISO is related to.
returned: success
type: string
sample: example project
tags:
description: List of resource tags associated with the ISO.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
version_added: "2.4"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackIso(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackIso, self).__init__(module)
self.returns = {
'checksum': 'checksum',
'status': 'status',
'isready': 'is_ready',
'crossZones': 'cross_zones',
'format': 'format',
'ostypename': 'os_type',
'isfeatured': 'is_featured',
'bootable': 'bootable',
'ispublic': 'is_public',
}
self.iso = None
def _get_common_args(self):
return {
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'isdynamicallyscalable': self.module.params.get('is_dynamically_scalable'),
'ostypeid': self.get_os_type('id'),
'bootable': self.module.params.get('bootable'),
}
def register_iso(self):
args = self._get_common_args()
args.update({
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
'checksum': self.module.params.get('checksum'),
'isfeatured': self.module.params.get('is_featured'),
'ispublic': self.module.params.get('is_public'),
})
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
else:
args['zoneid'] = -1
if args['bootable'] and not args['ostypeid']:
self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.")
args['url'] = self.module.params.get('url')
if not args['url']:
self.module.fail_json(msg="URL is requried.")
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('registerIso', **args)
self.iso = res['iso'][0]
return self.iso
def present_iso(self):
iso = self.get_iso()
if not iso:
iso = self.register_iso()
else:
iso = self.update_iso(iso)
if iso:
iso = self.ensure_tags(resource=iso, resource_type='ISO')
self.iso = iso
return iso
def update_iso(self, iso):
args = self._get_common_args()
args.update({
'id': iso['id'],
})
if self.has_changed(args, iso):
self.result['changed'] = True
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
else:
# Workaround API does not return cross_zones=true
self.result['cross_zones'] = True
args['zoneid'] = -1
if not self.module.check_mode:
res = self.query_api('updateIso', **args)
self.iso = res['iso']
return self.iso
def get_iso(self):
if not self.iso:
args = {
'isready': self.module.params.get('is_ready'),
'isofilter': self.module.params.get('iso_filter'),
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
}
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
# if checksum is set, we only look on that.
checksum = self.module.params.get('checksum')
if not checksum:
args['name'] = self.module.params.get('name')
isos = self.query_api('listIsos', **args)
if isos:
if not checksum:
self.iso = isos['iso'][0]
else:
for i in isos['iso']:
if i['checksum'] == checksum:
self.iso = i
break
return self.iso
def absent_iso(self):
iso = self.get_iso()
if iso:
self.result['changed'] = True
args = {
'id': iso['id'],
'projectid': self.get_project('id'),
}
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
if not self.module.check_mode:
res = self.query_api('deleteIso', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'iso')
return iso
def get_result(self, iso):
super(AnsibleCloudStackIso, self).get_result(iso)
# Workaround API does not return cross_zones=true
if self.module.params.get('cross_zones'):
self.result['cross_zones'] = True
if 'zone' in self.result:
del self.result['zone']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
url=dict(),
os_type=dict(),
zone=dict(),
cross_zones=dict(type='bool', default=False),
iso_filter=dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
domain=dict(),
account=dict(),
project=dict(),
checksum=dict(),
is_ready=dict(type='bool', default=False),
bootable=dict(type='bool'),
is_featured=dict(type='bool'),
is_dynamically_scalable=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
mutually_exclusive=(
['zone', 'cross_zones'],
),
supports_check_mode=True
)
acs_iso = AnsibleCloudStackIso(module)
state = module.params.get('state')
if state in ['absent']:
iso = acs_iso.absent_iso()
else:
iso = acs_iso.present_iso()
result = acs_iso.get_result(iso)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
quru/wagtail | wagtail/wagtailforms/views.py | 3 | 3887 | from __future__ import absolute_import, unicode_literals
import csv
import datetime
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin import messages
from wagtail.wagtailcore.models import Page
from wagtail.wagtailforms.forms import SelectDateForm
from wagtail.wagtailforms.models import FormSubmission, get_forms_for_user
def index(request):
form_pages = get_forms_for_user(request.user)
paginator, form_pages = paginate(request, form_pages)
return render(request, 'wagtailforms/index.html', {
'form_pages': form_pages,
})
def delete_submission(request, page_id, submission_id):
if not get_forms_for_user(request.user).filter(id=page_id).exists():
raise PermissionDenied
submission = get_object_or_404(FormSubmission, id=submission_id)
page = get_object_or_404(Page, id=page_id)
if request.method == 'POST':
submission.delete()
messages.success(request, _("Submission deleted."))
return redirect('wagtailforms:list_submissions', page_id)
return render(request, 'wagtailforms/confirm_delete.html', {
'page': page,
'submission': submission
})
def list_submissions(request, page_id):
form_page = get_object_or_404(Page, id=page_id).specific
if not get_forms_for_user(request.user).filter(id=page_id).exists():
raise PermissionDenied
data_fields = [
(field.clean_name, field.label)
for field in form_page.form_fields.all()
]
submissions = FormSubmission.objects.filter(page=form_page)
select_date_form = SelectDateForm(request.GET)
if select_date_form.is_valid():
date_from = select_date_form.cleaned_data.get('date_from')
date_to = select_date_form.cleaned_data.get('date_to')
# careful: date_to should be increased by 1 day since the submit_time
# is a time so it will always be greater
if date_to:
date_to += datetime.timedelta(days=1)
if date_from and date_to:
submissions = submissions.filter(submit_time__range=[date_from, date_to])
elif date_from and not date_to:
submissions = submissions.filter(submit_time__gte=date_from)
elif not date_from and date_to:
submissions = submissions.filter(submit_time__lte=date_to)
if request.GET.get('action') == 'CSV':
# return a CSV instead
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment;filename=export.csv'
writer = csv.writer(response)
header_row = ['Submission date'] + [smart_str(label) for name, label in data_fields]
writer.writerow(header_row)
for s in submissions:
data_row = [s.submit_time]
form_data = s.get_data()
for name, label in data_fields:
data_row.append(smart_str(form_data.get(name)))
writer.writerow(data_row)
return response
paginator, submissions = paginate(request, submissions)
data_headings = [label for name, label in data_fields]
data_rows = []
for s in submissions:
form_data = s.get_data()
data_row = [s.submit_time] + [form_data.get(name) for name, label in data_fields]
data_rows.append({
"model_id": s.id,
"fields": data_row
})
return render(request, 'wagtailforms/index_submissions.html', {
'form_page': form_page,
'select_date_form': select_date_form,
'submissions': submissions,
'data_headings': data_headings,
'data_rows': data_rows
})
| bsd-3-clause |
sdhash/sdhash | external/tools/build/v2/build/property.py | 10 | 17674 | # Status: ported, except for tests and --abbreviate-paths.
# Base revision: 64070
#
# Copyright 2001, 2002, 2003 Dave Abrahams
# Copyright 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import re
from b2.util.utility import *
from b2.build import feature
from b2.util import sequence, qualify_jam_action
import b2.util.set
from b2.manager import get_manager
__re_two_ampersands = re.compile ('&&')
__re_comma = re.compile (',')
__re_split_condition = re.compile ('(.*):(<.*)')
__re_split_conditional = re.compile (r'(.+):<(.+)')
__re_colon = re.compile (':')
__re_has_condition = re.compile (r':<')
__re_separate_condition_and_property = re.compile (r'(.*):(<.*)')
class Property(object):
__slots__ = ('_feature', '_value', '_condition')
def __init__(self, f, value, condition = []):
if type(f) == type(""):
f = feature.get(f)
# At present, single property has a single value.
assert type(value) != type([])
assert(f.free() or value.find(':') == -1)
self._feature = f
self._value = value
self._condition = condition
def feature(self):
return self._feature
def value(self):
return self._value
def condition(self):
return self._condition
def to_raw(self):
result = "<" + self._feature.name() + ">" + str(self._value)
if self._condition:
result = ",".join(str(p) for p in self._condition) + ':' + result
return result
def __str__(self):
return self.to_raw()
def __hash__(self):
# FIXME: consider if this class should be value-is-identity one
return hash((self._feature, self._value, tuple(self._condition)))
def __cmp__(self, other):
return cmp((self._feature, self._value, self._condition),
(other._feature, other._value, other._condition))
def create_from_string(s, allow_condition=False):
condition = []
import types
if not isinstance(s, types.StringType):
print type(s)
if __re_has_condition.search(s):
if not allow_condition:
raise BaseException("Conditional property is not allowed in this context")
m = __re_separate_condition_and_property.match(s)
condition = m.group(1)
s = m.group(2)
# FIXME: break dependency cycle
from b2.manager import get_manager
feature_name = get_grist(s)
if not feature_name:
if feature.is_implicit_value(s):
f = feature.implied_feature(s)
value = s
else:
raise get_manager().errors()("Invalid property '%s' -- unknown feature" % s)
else:
f = feature.get(feature_name)
value = get_value(s)
if not value:
get_manager().errors()("Invalid property '%s' -- no value specified" % s)
if condition:
condition = [create_from_string(x) for x in condition.split(',')]
return Property(f, value, condition)
def create_from_strings(string_list, allow_condition=False):
return [create_from_string(s, allow_condition) for s in string_list]
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __results
# A cache of results from as_path
__results = {}
reset ()
def path_order (x, y):
""" Helper for as_path, below. Orders properties with the implicit ones
first, and within the two sections in alphabetical order of feature
name.
"""
if x == y:
return 0
xg = get_grist (x)
yg = get_grist (y)
if yg and not xg:
return -1
elif xg and not yg:
return 1
else:
if not xg:
x = feature.expand_subfeatures([x])
y = feature.expand_subfeatures([y])
if x < y:
return -1
elif x > y:
return 1
else:
return 0
def identify(string):
return string
# Uses Property
def refine (properties, requirements):
""" Refines 'properties' by overriding any non-free properties
for which a different value is specified in 'requirements'.
Conditional requirements are just added without modification.
Returns the resulting list of properties.
"""
# The result has no duplicates, so we store it in a set
result = set()
# Records all requirements.
required = {}
# All the elements of requirements should be present in the result
# Record them so that we can handle 'properties'.
for r in requirements:
# Don't consider conditional requirements.
if not r.condition():
required[r.feature()] = r
for p in properties:
# Skip conditional properties
if p.condition():
result.add(p)
# No processing for free properties
elif p.feature().free():
result.add(p)
else:
if required.has_key(p.feature()):
result.add(required[p.feature()])
else:
result.add(p)
return sequence.unique(list(result) + requirements)
def translate_paths (properties, path):
""" Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form.
"""
result = []
for p in properties:
if p.feature().path():
values = __re_two_ampersands.split(p.value())
new_value = "&&".join(os.path.join(path, v) for v in values)
if new_value != p.value():
result.append(Property(p.feature(), new_value, p.condition()))
else:
result.append(p)
else:
result.append (p)
return result
def translate_indirect(properties, context_module):
"""Assumes that all feature values that start with '@' are
names of rules, used in 'context-module'. Such rules can be
either local to the module or global. Qualified local rules
with the name of the module."""
result = []
for p in properties:
if p.value()[0] == '@':
q = qualify_jam_action(p.value()[1:], context_module)
get_manager().engine().register_bjam_action(q)
result.append(Property(p.feature(), '@' + q, p.condition()))
else:
result.append(p)
return result
def validate (properties):
""" Exit with error if any of the properties is not valid.
properties may be a single property or a sequence of properties.
"""
if isinstance (properties, str):
__validate1 (properties)
else:
for p in properties:
__validate1 (p)
def expand_subfeatures_in_conditions (properties):
result = []
for p in properties:
if not p.condition():
result.append(p)
else:
expanded = []
for c in p.condition():
if c.feature().name().startswith("toolset") or c.feature().name() == "os":
# It common that condition includes a toolset which
# was never defined, or mentiones subfeatures which
# were never defined. In that case, validation will
# only produce an spirious error, so don't validate.
expanded.extend(feature.expand_subfeatures ([c], True))
else:
expanded.extend(feature.expand_subfeatures([c]))
result.append(Property(p.feature(), p.value(), expanded))
return result
# FIXME: this should go
def split_conditional (property):
""" If 'property' is conditional property, returns
condition and the property, e.g
<variant>debug,<toolset>gcc:<inlining>full will become
<variant>debug,<toolset>gcc <inlining>full.
Otherwise, returns empty string.
"""
m = __re_split_conditional.match (property)
if m:
return (m.group (1), '<' + m.group (2))
return None
def select (features, properties):
""" Selects properties which correspond to any of the given features.
"""
result = []
# add any missing angle brackets
features = add_grist (features)
return [p for p in properties if get_grist(p) in features]
def validate_property_sets (sets):
for s in sets:
validate(s.all())
def evaluate_conditionals_in_context (properties, context):
""" Removes all conditional properties which conditions are not met
For those with met conditions, removes the condition. Properies
in conditions are looked up in 'context'
"""
base = []
conditional = []
for p in properties:
if p.condition():
conditional.append (p)
else:
base.append (p)
result = base[:]
for p in conditional:
# Evaluate condition
# FIXME: probably inefficient
if all(x in context for x in p.condition()):
result.append(Property(p.feature(), p.value()))
return result
def change (properties, feature, value = None):
""" Returns a modified version of properties with all values of the
given feature replaced by the given value.
If 'value' is None the feature will be removed.
"""
result = []
feature = add_grist (feature)
for p in properties:
if get_grist (p) == feature:
if value:
result.append (replace_grist (value, feature))
else:
result.append (p)
return result
################################################################
# Private functions
def __validate1 (property):
""" Exit with error if property is not valid.
"""
msg = None
if not property.feature().free():
feature.validate_value_string (property.feature(), property.value())
###################################################################
# Still to port.
# Original lines are prefixed with "# "
#
#
# import utility : ungrist ;
# import sequence : unique ;
# import errors : error ;
# import feature ;
# import regex ;
# import sequence ;
# import set ;
# import path ;
# import assert ;
#
#
# rule validate-property-sets ( property-sets * )
# {
# for local s in $(property-sets)
# {
# validate [ feature.split $(s) ] ;
# }
# }
#
def remove(attributes, properties):
"""Returns a property sets which include all the elements
in 'properties' that do not have attributes listed in 'attributes'."""
result = []
for e in properties:
attributes_new = feature.attributes(get_grist(e))
has_common_features = 0
for a in attributes_new:
if a in attributes:
has_common_features = 1
break
if not has_common_features:
result += e
return result
def take(attributes, properties):
"""Returns a property set which include all
properties in 'properties' that have any of 'attributes'."""
result = []
for e in properties:
if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))):
result.append(e)
return result
def translate_dependencies(properties, project_id, location):
result = []
for p in properties:
if not p.feature().dependency():
result.append(p)
else:
v = p.value()
m = re.match("(.*)//(.*)", v)
if m:
rooted = m.group(1)
if rooted[0] == '/':
# Either project id or absolute Linux path, do nothing.
pass
else:
rooted = os.path.join(os.getcwd(), location, rooted)
result.append(Property(p.feature(), rooted + "//" + m.group(2), p.condition()))
elif os.path.isabs(v):
result.append(p)
else:
result.append(Property(p.feature(), project_id + "//" + v, p.condition()))
return result
class PropertyMap:
""" Class which maintains a property set -> string mapping.
"""
def __init__ (self):
self.__properties = []
self.__values = []
def insert (self, properties, value):
""" Associate value with properties.
"""
self.__properties.append(properties)
self.__values.append(value)
def find (self, properties):
""" Return the value associated with properties
or any subset of it. If more than one
subset has value assigned to it, return the
value for the longest subset, if it's unique.
"""
return self.find_replace (properties)
def find_replace(self, properties, value=None):
matches = []
match_ranks = []
for i in range(0, len(self.__properties)):
p = self.__properties[i]
if b2.util.set.contains (p, properties):
matches.append (i)
match_ranks.append(len(p))
best = sequence.select_highest_ranked (matches, match_ranks)
if not best:
return None
if len (best) > 1:
raise NoBestMatchingAlternative ()
best = best [0]
original = self.__values[best]
if value:
self.__values[best] = value
return original
# local rule __test__ ( )
# {
# import errors : try catch ;
# import feature ;
# import feature : feature subfeature compose ;
#
# # local rules must be explicitly re-imported
# import property : path-order ;
#
# feature.prepare-test property-test-temp ;
#
# feature toolset : gcc : implicit symmetric ;
# subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4
# 3.0 3.0.1 3.0.2 : optional ;
# feature define : : free ;
# feature runtime-link : dynamic static : symmetric link-incompatible ;
# feature optimization : on off ;
# feature variant : debug release : implicit composite symmetric ;
# feature rtti : on off : link-incompatible ;
#
# compose <variant>debug : <define>_DEBUG <optimization>off ;
# compose <variant>release : <define>NDEBUG <optimization>on ;
#
# import assert ;
# import "class" : new ;
#
# validate <toolset>gcc <toolset>gcc-3.0.1 : $(test-space) ;
#
# assert.result <toolset>gcc <rtti>off <define>FOO
# : refine <toolset>gcc <rtti>off
# : <define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <optimization>on
# : refine <toolset>gcc <optimization>off
# : <optimization>on
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off
# : refine <toolset>gcc : <rtti>off : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off <rtti>off:<define>FOO
# : refine <toolset>gcc : <rtti>off <rtti>off:<define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc:<define>foo <toolset>gcc:<define>bar
# : refine <toolset>gcc:<define>foo : <toolset>gcc:<define>bar
# : $(test-space)
# ;
#
# assert.result <define>MY_RELEASE
# : evaluate-conditionals-in-context
# <variant>release,<rtti>off:<define>MY_RELEASE
# : <toolset>gcc <variant>release <rtti>off
#
# ;
#
# try ;
# validate <feature>value : $(test-space) ;
# catch "Invalid property '<feature>value': unknown feature 'feature'." ;
#
# try ;
# validate <rtti>default : $(test-space) ;
# catch \"default\" is not a known value of feature <rtti> ;
#
# validate <define>WHATEVER : $(test-space) ;
#
# try ;
# validate <rtti> : $(test-space) ;
# catch "Invalid property '<rtti>': No value specified for feature 'rtti'." ;
#
# try ;
# validate value : $(test-space) ;
# catch "value" is not a value of an implicit feature ;
#
#
# assert.result <rtti>on
# : remove free implicit : <toolset>gcc <define>foo <rtti>on : $(test-space) ;
#
# assert.result <include>a
# : select include : <include>a <toolset>gcc ;
#
# assert.result <include>a
# : select include bar : <include>a <toolset>gcc ;
#
# assert.result <include>a <toolset>gcc
# : select include <bar> <toolset> : <include>a <toolset>gcc ;
#
# assert.result <toolset>kylix <include>a
# : change <toolset>gcc <include>a : <toolset> kylix ;
#
# # Test ordinary properties
# assert.result
# : split-conditional <toolset>gcc
# ;
#
# # Test properties with ":"
# assert.result
# : split-conditional <define>FOO=A::B
# ;
#
# # Test conditional feature
# assert.result <toolset>gcc,<toolset-gcc:version>3.0 <define>FOO
# : split-conditional <toolset>gcc,<toolset-gcc:version>3.0:<define>FOO
# ;
#
# feature.finish-test property-test-temp ;
# }
#
| apache-2.0 |
errx/django | tests/swappable_models/tests.py | 59 | 2156 | from __future__ import unicode_literals
from django.utils.six import StringIO
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.test import TestCase, override_settings
from swappable_models.models import Article
class SwappableModelTests(TestCase):
available_apps = [
'swappable_models',
'django.contrib.auth',
'django.contrib.contenttypes',
]
@override_settings(TEST_ARTICLE_MODEL='swappable_models.AlternateArticle')
def test_generated_data(self):
"Permissions and content types are not created for a swapped model"
# Delete all permissions and content_types
Permission.objects.filter(content_type__app_label='swappable_models').delete()
ContentType.objects.filter(app_label='swappable_models').delete()
# Re-run migrate. This will re-build the permissions and content types.
new_io = StringIO()
management.call_command('migrate', load_initial_data=False, interactive=False, stdout=new_io)
# Check that content types and permissions exist for the swapped model,
# but not for the swappable model.
apps_models = [(p.content_type.app_label, p.content_type.model)
for p in Permission.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
apps_models = [(ct.app_label, ct.model)
for ct in ContentType.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
@override_settings(TEST_ARTICLE_MODEL='swappable_models.article')
def test_case_insensitive(self):
"Model names are case insensitive. Check that model swapping honors this."
try:
Article.objects.all()
except AttributeError:
self.fail('Swappable model names should be case insensitive.')
self.assertIsNone(Article._meta.swapped)
| bsd-3-clause |
aadrian/w2ui | server/python/django_w2ui/django_w2ui/demo/views.py | 25 | 1369 | # Create your views here.
from django_w2ui.demo.models import Users, Tipo_User
from django_w2ui.views import W2uiGridView,W2uiFormView
from django.views.generic.base import TemplateView
import json
class IndexView(TemplateView):
template_name = 'django_w2ui/index.html'
class ServerSideObjectsView(TemplateView):
template_name = 'django_w2ui/server-side-objects.html'
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
tipo_user = Tipo_User.objects.all().values_list("descri",flat=True)
tipo_user = [str(descri) for descri in tipo_user]
context.update({"tipo_user": json.dumps(tipo_user)})
return self.render_to_response(context)
class UsersW2uiGridView(W2uiGridView):
model = Users
fields = (
"fname",
"lname",
"email",
"login",
"password",
"date_birthday",
"date_registration",
"importo_registrato",
"text",
"timestamp"
)
class UsersW2uiFormView(W2uiFormView):
model = Users
fields = (
"fname",
"lname",
"email",
"login",
"password",
"date_birthday",
"date_registration",
"importo_registrato",
"text",
"timestamp"
) | mit |
wangpanjun/django-rest-framework | tests/test_renderers.py | 53 | 17136 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import re
from collections import MutableMapping
from django.conf.urls import include, url
from django.core.cache import cache
from django.db import models
from django.test import TestCase
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from rest_framework import permissions, serializers, status
from rest_framework.compat import OrderedDict
from rest_framework.renderers import (
BaseRenderer, BrowsableAPIRenderer, HTMLFormRenderer, JSONRenderer
)
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory
from rest_framework.views import APIView
DUMMYSTATUS = status.HTTP_200_OK
DUMMYCONTENT = 'dummycontent'
def RENDERER_A_SERIALIZER(x):
return ('Renderer A: %s' % x).encode('ascii')
def RENDERER_B_SERIALIZER(x):
return ('Renderer B: %s' % x).encode('ascii')
expected_results = [
((elem for elem in [1, 2, 3]), JSONRenderer, b'[1,2,3]') # Generator
]
class DummyTestModel(models.Model):
name = models.CharField(max_length=42, default='')
class BasicRendererTests(TestCase):
def test_expected_results(self):
for value, renderer_cls, expected in expected_results:
output = renderer_cls().render(value)
self.assertEqual(output, expected)
class RendererA(BaseRenderer):
media_type = 'mock/renderera'
format = "formata"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_A_SERIALIZER(data)
class RendererB(BaseRenderer):
media_type = 'mock/rendererb'
format = "formatb"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_B_SERIALIZER(data)
class MockView(APIView):
renderer_classes = (RendererA, RendererB)
def get(self, request, **kwargs):
response = Response(DUMMYCONTENT, status=DUMMYSTATUS)
return response
class MockGETView(APIView):
def get(self, request, **kwargs):
return Response({'foo': ['bar', 'baz']})
class MockPOSTView(APIView):
def post(self, request, **kwargs):
return Response({'foo': request.data})
class EmptyGETView(APIView):
renderer_classes = (JSONRenderer,)
def get(self, request, **kwargs):
return Response(status=status.HTTP_204_NO_CONTENT)
class HTMLView(APIView):
renderer_classes = (BrowsableAPIRenderer, )
def get(self, request, **kwargs):
return Response('text')
class HTMLView1(APIView):
renderer_classes = (BrowsableAPIRenderer, JSONRenderer)
def get(self, request, **kwargs):
return Response('text')
urlpatterns = [
url(r'^.*\.(?P<format>.+)$', MockView.as_view(renderer_classes=[RendererA, RendererB])),
url(r'^$', MockView.as_view(renderer_classes=[RendererA, RendererB])),
url(r'^cache$', MockGETView.as_view()),
url(r'^parseerror$', MockPOSTView.as_view(renderer_classes=[JSONRenderer, BrowsableAPIRenderer])),
url(r'^html$', HTMLView.as_view()),
url(r'^html1$', HTMLView1.as_view()),
url(r'^empty$', EmptyGETView.as_view()),
url(r'^api', include('rest_framework.urls', namespace='rest_framework'))
]
class POSTDeniedPermission(permissions.BasePermission):
def has_permission(self, request, view):
return request.method != 'POST'
class POSTDeniedView(APIView):
renderer_classes = (BrowsableAPIRenderer,)
permission_classes = (POSTDeniedPermission,)
def get(self, request):
return Response()
def post(self, request):
return Response()
def put(self, request):
return Response()
def patch(self, request):
return Response()
class DocumentingRendererTests(TestCase):
def test_only_permitted_forms_are_displayed(self):
view = POSTDeniedView.as_view()
request = APIRequestFactory().get('/')
response = view(request).render()
self.assertNotContains(response, '>POST<')
self.assertContains(response, '>PUT<')
self.assertContains(response, '>PATCH<')
class RendererEndToEndTests(TestCase):
"""
End-to-end testing of renderers using an RendererMixin on a generic view.
"""
urls = 'tests.test_renderers'
def test_default_renderer_serializes_content(self):
"""If the Accept header is not set the default renderer should serialize the response."""
resp = self.client.get('/')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_head_method_serializes_no_content(self):
"""No response must be included in HEAD requests."""
resp = self.client.head('/')
self.assertEqual(resp.status_code, DUMMYSTATUS)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, six.b(''))
def test_default_renderer_serializes_content_on_accept_any(self):
"""If the Accept header is set to */* the default renderer should serialize the response."""
resp = self.client.get('/', HTTP_ACCEPT='*/*')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for the default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererA.media_type)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_non_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for a non-default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_accept_query(self):
"""The '_accept' query string should behave in the same way as the Accept header."""
param = '?%s=%s' % (
api_settings.URL_ACCEPT_OVERRIDE,
RendererB.media_type
)
resp = self.client.get('/' + param)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_unsatisfiable_accept_header_on_request_returns_406_status(self):
"""If the Accept header is unsatisfiable we should return a 406 Not Acceptable response."""
resp = self.client.get('/', HTTP_ACCEPT='foo/bar')
self.assertEqual(resp.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_specified_renderer_serializes_content_on_format_query(self):
"""If a 'format' query is specified, the renderer with the matching
format attribute should serialize the response."""
param = '?%s=%s' % (
api_settings.URL_FORMAT_OVERRIDE,
RendererB.format
)
resp = self.client.get('/' + param)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_kwargs(self):
"""If a 'format' keyword arg is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/something.formatb')
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
"""If both a 'format' query and a matching Accept header specified,
the renderer with the matching format attribute should serialize the response."""
param = '?%s=%s' % (
api_settings.URL_FORMAT_OVERRIDE,
RendererB.format
)
resp = self.client.get('/' + param,
HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_parse_error_renderers_browsable_api(self):
"""Invalid data should still render the browsable API correctly."""
resp = self.client.post('/parseerror', data='foobar', content_type='application/json', HTTP_ACCEPT='text/html')
self.assertEqual(resp['Content-Type'], 'text/html; charset=utf-8')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_204_no_content_responses_have_no_content_type_set(self):
"""
Regression test for #1196
https://github.com/tomchristie/django-rest-framework/issues/1196
"""
resp = self.client.get('/empty')
self.assertEqual(resp.get('Content-Type', None), None)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
def test_contains_headers_of_api_response(self):
"""
Issue #1437
Test we display the headers of the API response and not those from the
HTML response
"""
resp = self.client.get('/html1')
self.assertContains(resp, '>GET, HEAD, OPTIONS<')
self.assertContains(resp, '>application/json<')
self.assertNotContains(resp, '>text/html; charset=utf-8<')
_flat_repr = '{"foo":["bar","baz"]}'
_indented_repr = '{\n "foo": [\n "bar",\n "baz"\n ]\n}'
def strip_trailing_whitespace(content):
"""
Seems to be some inconsistencies re. trailing whitespace with
different versions of the json lib.
"""
return re.sub(' +\n', '\n', content)
class JSONRendererTests(TestCase):
"""
Tests specific to the JSON Renderer
"""
def test_render_lazy_strings(self):
"""
JSONRenderer should deal with lazy translated strings.
"""
ret = JSONRenderer().render(_('test'))
self.assertEqual(ret, b'"test"')
def test_render_queryset_values(self):
o = DummyTestModel.objects.create(name='dummy')
qs = DummyTestModel.objects.values('id', 'name')
ret = JSONRenderer().render(qs)
data = json.loads(ret.decode('utf-8'))
self.assertEquals(data, [{'id': o.id, 'name': o.name}])
def test_render_queryset_values_list(self):
o = DummyTestModel.objects.create(name='dummy')
qs = DummyTestModel.objects.values_list('id', 'name')
ret = JSONRenderer().render(qs)
data = json.loads(ret.decode('utf-8'))
self.assertEquals(data, [[o.id, o.name]])
def test_render_dict_abc_obj(self):
class Dict(MutableMapping):
def __init__(self):
self._dict = dict()
def __getitem__(self, key):
return self._dict.__getitem__(key)
def __setitem__(self, key, value):
return self._dict.__setitem__(key, value)
def __delitem__(self, key):
return self._dict.__delitem__(key)
def __iter__(self):
return self._dict.__iter__()
def __len__(self):
return self._dict.__len__()
def keys(self):
return self._dict.keys()
x = Dict()
x['key'] = 'string value'
x[2] = 3
ret = JSONRenderer().render(x)
data = json.loads(ret.decode('utf-8'))
self.assertEquals(data, {'key': 'string value', '2': 3})
def test_render_obj_with_getitem(self):
class DictLike(object):
def __init__(self):
self._dict = {}
def set(self, value):
self._dict = dict(value)
def __getitem__(self, key):
return self._dict[key]
x = DictLike()
x.set({'a': 1, 'b': 'string'})
with self.assertRaises(TypeError):
JSONRenderer().render(x)
def test_without_content_type_args(self):
"""
Test basic JSON rendering.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer()
content = renderer.render(obj, 'application/json')
# Fix failing test case which depends on version of JSON library.
self.assertEqual(content.decode('utf-8'), _flat_repr)
def test_with_content_type_args(self):
"""
Test JSON rendering with additional content type arguments supplied.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer()
content = renderer.render(obj, 'application/json; indent=2')
self.assertEqual(strip_trailing_whitespace(content.decode('utf-8')), _indented_repr)
class UnicodeJSONRendererTests(TestCase):
"""
Tests specific for the Unicode JSON Renderer
"""
def test_proper_encoding(self):
obj = {'countries': ['United Kingdom', 'France', 'España']}
renderer = JSONRenderer()
content = renderer.render(obj, 'application/json')
self.assertEqual(content, '{"countries":["United Kingdom","France","España"]}'.encode('utf-8'))
def test_u2028_u2029(self):
# The \u2028 and \u2029 characters should be escaped,
# even when the non-escaping unicode representation is used.
# Regression test for #2169
obj = {'should_escape': '\u2028\u2029'}
renderer = JSONRenderer()
content = renderer.render(obj, 'application/json')
self.assertEqual(content, '{"should_escape":"\\u2028\\u2029"}'.encode('utf-8'))
class AsciiJSONRendererTests(TestCase):
"""
Tests specific for the Unicode JSON Renderer
"""
def test_proper_encoding(self):
class AsciiJSONRenderer(JSONRenderer):
ensure_ascii = True
obj = {'countries': ['United Kingdom', 'France', 'España']}
renderer = AsciiJSONRenderer()
content = renderer.render(obj, 'application/json')
self.assertEqual(content, '{"countries":["United Kingdom","France","Espa\\u00f1a"]}'.encode('utf-8'))
# Tests for caching issue, #346
class CacheRenderTest(TestCase):
"""
Tests specific to caching responses
"""
urls = 'tests.test_renderers'
def test_head_caching(self):
"""
Test caching of HEAD requests
"""
response = self.client.head('/cache')
cache.set('key', response)
cached_response = cache.get('key')
assert isinstance(cached_response, Response)
assert cached_response.content == response.content
assert cached_response.status_code == response.status_code
def test_get_caching(self):
"""
Test caching of GET requests
"""
response = self.client.get('/cache')
cache.set('key', response)
cached_response = cache.get('key')
assert isinstance(cached_response, Response)
assert cached_response.content == response.content
assert cached_response.status_code == response.status_code
class TestJSONIndentationStyles:
def test_indented(self):
renderer = JSONRenderer()
data = OrderedDict([('a', 1), ('b', 2)])
assert renderer.render(data) == b'{"a":1,"b":2}'
def test_compact(self):
renderer = JSONRenderer()
data = OrderedDict([('a', 1), ('b', 2)])
context = {'indent': 4}
assert (
renderer.render(data, renderer_context=context) ==
b'{\n "a": 1,\n "b": 2\n}'
)
def test_long_form(self):
renderer = JSONRenderer()
renderer.compact = False
data = OrderedDict([('a', 1), ('b', 2)])
assert renderer.render(data) == b'{"a": 1, "b": 2}'
class TestHiddenFieldHTMLFormRenderer(TestCase):
def test_hidden_field_rendering(self):
class TestSerializer(serializers.Serializer):
published = serializers.HiddenField(default=True)
serializer = TestSerializer(data={})
serializer.is_valid()
renderer = HTMLFormRenderer()
field = serializer['published']
rendered = renderer.render_field(field, {})
assert rendered == ''
| bsd-2-clause |
aranjan7/contrail-controller-aranjan | src/config/api-server/tests/fab_tasks.py | 3 | 4553 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
from fabric.api import task, lcd, prefix, execute, local
@task
def setup_venv(build_top = "../../../../../build"):
venv_base = "%s/debug/config/api-server" %(build_top)
with lcd(venv_base):
local("virtualenv ut-venv")
venv_dir = "%s/ut-venv" %(venv_base)
with lcd(venv_dir):
with prefix(". bin/activate"):
local(
"pip install --upgrade "
"%s/debug/config/common/dist/cfgm_common-0.1dev.tar.gz" % build_top)
local(
"pip install --upgrade "
"%s/debug/config/api-server/dist/vnc_cfg_api_server-0.1dev.tar.gz" % build_top)
local(
"pip install --upgrade "
"%s/debug/api-lib/dist/vnc_api-0.1dev.tar.gz" % build_top)
local(
"pip install --upgrade "
"%s/debug/tools/sandesh/library/python/dist/sandesh-0.1dev.tar.gz" % build_top)
local(
"pip install --upgrade "
"%s/debug/sandesh/common/dist/sandesh-common-0.1dev.tar.gz" % build_top)
local(
"pip install --upgrade "
"%s/debug/discovery/client/dist/discoveryclient-0.1dev.tar.gz" % build_top)
local("pip install redis==2.7.1")
local("pip install stevedore==0.11")
local("pip install netifaces==0.8")
local("pip install xmltodict")
local("pip install fixtures==0.3.12")
local("pip install testtools==0.9.32")
local("pip install flexmock==0.9.7")
local("pip install python-novaclient==2.13.0")
local("pip install stevedore")
local("pip install netifaces")
local("pip install requests==2.0.0")
local("pip install kazoo")
pyver = "%s.%s" % (sys.version_info[0], sys.version_info[1])
# 2.6 requirements
local("pip install ordereddict")
if pyver == '2.6':
local("pip install importlib")
local(
"cp ../../../../../controller/src/config/api-server/tests/"
"test_common.py lib/python%s/site-packages/"
"vnc_cfg_api_server/" %
(pyver))
#end setup_venv
@task
def destroy_venv(build_top = "../../../../../build"):
venv_base = "%s/debug/config/api-server" % (build_top)
venv_dir = "%s/ut-venv" % (venv_base)
local("rm -rf %s" % (venv_dir))
#end destroy_venv
@task
def run_tests(build_top = "../../../../../build"):
venv_base = "%s/debug/config/api-server" % (build_top)
venv_dir = "%s/ut-venv" % (venv_base)
with lcd(venv_dir):
with prefix("source bin/activate"):
pyver = "%s.%s" % (sys.version_info[0], sys.version_info[1])
local(
"cp ../../../../../controller/src/config/api-server/tests/"
"test_crud_basic.py lib/python%s/site-packages/"
"vnc_cfg_api_server/" % (pyver))
local(
"python lib/python%s/site-packages/"
"vnc_cfg_api_server/test_crud_basic.py" % (pyver))
#end run_tests
@task
def run_api_srv(build_top = "../../../../../build", listen_ip = None, listen_port = None):
venv_base = "%s/debug/config/api-server" % (build_top)
venv_dir = "%s/ut-venv" % (venv_base)
with lcd(venv_dir):
with prefix(". bin/activate"):
pyver = "%s.%s" %(sys.version_info[0], sys.version_info[1])
local(
"cp ../../../../../controller/src/config/api-server/tests/"
"fake_api_server.py lib/python%s/site-packages/"
"vnc_cfg_api_server/" % (pyver))
opt_str = ""
if listen_ip:
opt_str = "%s --listen_ip %s" % (opt_str, listen_ip)
if listen_port:
opt_str = "%s --listen_port %s" % (opt_str, listen_port)
local(
"python lib/python%s/site-packages/"
"vnc_cfg_api_server/fake_api_server.py %s" % (pyver, opt_str))
#end run_api_srv
@task
def setup_and_run_tests(build_top = "../../../../../build"):
execute(setup_venv, build_top)
execute(run_tests, build_top)
#end setup_and_run_tests
@task
def setup_and_run_api_srv(build_top = "../../../../../build", listen_ip = None, listen_port = None):
execute(setup_venv, build_top)
execute(run_api_srv, build_top, listen_ip, listen_port)
#end setup_and_run_api_srv
| apache-2.0 |
Lektorium-LLC/edx-platform | lms/tests.py | 5 | 2048 | """Tests for the lms module itself."""
import mimetypes
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from edxmako import LOOKUP, add_lookup
from lms import startup
from openedx.features.course_experience import course_home_url_name
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class LmsModuleTests(TestCase):
"""
Tests for lms module itself.
"""
def test_new_mimetypes(self):
extensions = ['eot', 'otf', 'ttf', 'woff']
for extension in extensions:
mimetype, _ = mimetypes.guess_type('test.' + extension)
self.assertIsNotNone(mimetype)
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([directory for directory in directories if 'external_module' in directory]), 1)
# This should not clear the directories list
startup.enable_microsites()
directories = LOOKUP['main'].directories
self.assertEqual(len([directory for directory in directories if 'external_module' in directory]), 1)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_FEEDBACK_SUBMISSION': True})
class HelpModalTests(ModuleStoreTestCase):
"""Tests for the help modal"""
def setUp(self):
super(HelpModalTests, self).setUp()
self.course = CourseFactory.create()
def test_simple_test(self):
"""
Simple test to make sure that you don't get a 500 error when the modal
is enabled.
"""
url = reverse(course_home_url_name(self.course.id), args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
| agpl-3.0 |
lmorchard/badg.us | vendor-local/lib/python/chardet/langcyrillicmodel.py | 235 | 17817 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = ( \
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = { \
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': constants.False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = { \
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': constants.False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = { \
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': constants.False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = { \
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': constants.False,
'charsetName': "MacCyrillic"
};
Ibm866Model = { \
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': constants.False,
'charsetName': "IBM866"
}
Ibm855Model = { \
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': constants.False,
'charsetName': "IBM855"
}
| bsd-3-clause |
allenai/deep_qa | tests/data/instances/sequence_tagging/test_tagging_instance.py | 2 | 1148 | # pylint: disable=no-self-use,invalid-name
from deep_qa.data.instances.sequence_tagging.tagging_instance import IndexedTaggingInstance
from deep_qa.testing.test_case import DeepQaTestCase
from numpy.testing import assert_array_almost_equal
class TestIndexedTaggingInstance(DeepQaTestCase):
def setUp(self):
super(TestIndexedTaggingInstance, self).setUp()
self.instance = IndexedTaggingInstance([1, 2, 3, 4], [4, 5, 6])
def test_get_padding_lengths_returns_correct_lengths(self):
assert self.instance.get_padding_lengths() == {'num_sentence_words': 4}
def test_pad_truncates_correctly(self):
self.instance.pad({'num_sentence_words': 2})
assert self.instance.text_indices == [1, 2]
def test_pad_adds_padding_correctly(self):
self.instance.pad({'num_sentence_words': 6})
assert self.instance.text_indices == [1, 2, 3, 4, 0, 0]
def test_as_training_data_produces_correct_arrays(self):
text_array, label_array = self.instance.as_training_data()
assert_array_almost_equal(text_array, [1, 2, 3, 4])
assert_array_almost_equal(label_array, [4, 5, 6])
| apache-2.0 |
qedi-r/home-assistant | homeassistant/components/mysensors/light.py | 3 | 8091 | """Support for MySensors lights."""
from homeassistant.components import mysensors
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_WHITE_VALUE,
Light,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.util.color import rgb_hex_to_rgb_list
import homeassistant.util.color as color_util
SUPPORT_MYSENSORS_RGBW = SUPPORT_COLOR | SUPPORT_WHITE_VALUE
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the mysensors platform for lights."""
device_class_map = {
"S_DIMMER": MySensorsLightDimmer,
"S_RGB_LIGHT": MySensorsLightRGB,
"S_RGBW_LIGHT": MySensorsLightRGBW,
}
mysensors.setup_mysensors_platform(
hass,
DOMAIN,
discovery_info,
device_class_map,
async_add_entities=async_add_entities,
)
class MySensorsLight(mysensors.device.MySensorsEntity, Light):
"""Representation of a MySensors Light child node."""
def __init__(self, *args):
"""Initialize a MySensors Light."""
super().__init__(*args)
self._state = None
self._brightness = None
self._hs = None
self._white = None
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def hs_color(self):
"""Return the hs color value [int, int]."""
return self._hs
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return self._white
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return self.gateway.optimistic
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def _turn_on_light(self):
"""Turn on light child device."""
set_req = self.gateway.const.SetReq
if self._state:
return
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_LIGHT, 1, ack=1
)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._state = True
self._values[set_req.V_LIGHT] = STATE_ON
def _turn_on_dimmer(self, **kwargs):
"""Turn on dimmer child device."""
set_req = self.gateway.const.SetReq
brightness = self._brightness
if (
ATTR_BRIGHTNESS not in kwargs
or kwargs[ATTR_BRIGHTNESS] == self._brightness
or set_req.V_DIMMER not in self._values
):
return
brightness = kwargs[ATTR_BRIGHTNESS]
percent = round(100 * brightness / 255)
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_DIMMER, percent, ack=1
)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._brightness = brightness
self._values[set_req.V_DIMMER] = percent
def _turn_on_rgb_and_w(self, hex_template, **kwargs):
"""Turn on RGB or RGBW child device."""
rgb = list(color_util.color_hs_to_RGB(*self._hs))
white = self._white
hex_color = self._values.get(self.value_type)
hs_color = kwargs.get(ATTR_HS_COLOR)
if hs_color is not None:
new_rgb = color_util.color_hs_to_RGB(*hs_color)
else:
new_rgb = None
new_white = kwargs.get(ATTR_WHITE_VALUE)
if new_rgb is None and new_white is None:
return
if new_rgb is not None:
rgb = list(new_rgb)
if hex_template == "%02x%02x%02x%02x":
if new_white is not None:
rgb.append(new_white)
else:
rgb.append(white)
hex_color = hex_template % tuple(rgb)
if len(rgb) > 3:
white = rgb.pop()
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, hex_color, ack=1
)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._hs = color_util.color_RGB_to_hs(*rgb)
self._white = white
self._values[self.value_type] = hex_color
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
value_type = self.gateway.const.SetReq.V_LIGHT
self.gateway.set_child_value(self.node_id, self.child_id, value_type, 0, ack=1)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._state = False
self._values[value_type] = STATE_OFF
self.async_schedule_update_ha_state()
def _async_update_light(self):
"""Update the controller with values from light child."""
value_type = self.gateway.const.SetReq.V_LIGHT
self._state = self._values[value_type] == STATE_ON
def _async_update_dimmer(self):
"""Update the controller with values from dimmer child."""
value_type = self.gateway.const.SetReq.V_DIMMER
if value_type in self._values:
self._brightness = round(255 * int(self._values[value_type]) / 100)
if self._brightness == 0:
self._state = False
def _async_update_rgb_or_w(self):
"""Update the controller with values from RGB or RGBW child."""
value = self._values[self.value_type]
color_list = rgb_hex_to_rgb_list(value)
if len(color_list) > 3:
self._white = color_list.pop()
self._hs = color_util.color_RGB_to_hs(*color_list)
class MySensorsLightDimmer(MySensorsLight):
"""Dimmer child class to MySensorsLight."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
self._turn_on_light()
self._turn_on_dimmer(**kwargs)
if self.gateway.optimistic:
self.async_schedule_update_ha_state()
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
await super().async_update()
self._async_update_light()
self._async_update_dimmer()
class MySensorsLightRGB(MySensorsLight):
"""RGB child class to MySensorsLight."""
@property
def supported_features(self):
"""Flag supported features."""
set_req = self.gateway.const.SetReq
if set_req.V_DIMMER in self._values:
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
return SUPPORT_COLOR
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
self._turn_on_light()
self._turn_on_dimmer(**kwargs)
self._turn_on_rgb_and_w("%02x%02x%02x", **kwargs)
if self.gateway.optimistic:
self.async_schedule_update_ha_state()
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
await super().async_update()
self._async_update_light()
self._async_update_dimmer()
self._async_update_rgb_or_w()
class MySensorsLightRGBW(MySensorsLightRGB):
"""RGBW child class to MySensorsLightRGB."""
# pylint: disable=too-many-ancestors
@property
def supported_features(self):
"""Flag supported features."""
set_req = self.gateway.const.SetReq
if set_req.V_DIMMER in self._values:
return SUPPORT_BRIGHTNESS | SUPPORT_MYSENSORS_RGBW
return SUPPORT_MYSENSORS_RGBW
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
self._turn_on_light()
self._turn_on_dimmer(**kwargs)
self._turn_on_rgb_and_w("%02x%02x%02x%02x", **kwargs)
if self.gateway.optimistic:
self.async_schedule_update_ha_state()
| apache-2.0 |
pbrod/numpy | numpy/lib/ufunclike.py | 7 | 8031 | """
Module of functions that are like ufuncs in acting on arrays and optionally
storing results in an output array.
"""
__all__ = ['fix', 'isneginf', 'isposinf']
import numpy.core.numeric as nx
from numpy.core.overrides import (
array_function_dispatch, ARRAY_FUNCTION_ENABLED,
)
import warnings
import functools
def _deprecate_out_named_y(f):
"""
Allow the out argument to be passed as the name `y` (deprecated)
In future, this decorator should be removed.
"""
@functools.wraps(f)
def func(x, out=None, **kwargs):
if 'y' in kwargs:
if 'out' in kwargs:
raise TypeError(
"{} got multiple values for argument 'out'/'y'"
.format(f.__name__)
)
out = kwargs.pop('y')
# NumPy 1.13.0, 2017-04-26
warnings.warn(
"The name of the out argument to {} has changed from `y` to "
"`out`, to match other ufuncs.".format(f.__name__),
DeprecationWarning, stacklevel=3)
return f(x, out=out, **kwargs)
return func
def _fix_out_named_y(f):
"""
Allow the out argument to be passed as the name `y` (deprecated)
This decorator should only be used if _deprecate_out_named_y is used on
a corresponding dispatcher function.
"""
@functools.wraps(f)
def func(x, out=None, **kwargs):
if 'y' in kwargs:
# we already did error checking in _deprecate_out_named_y
out = kwargs.pop('y')
return f(x, out=out, **kwargs)
return func
def _fix_and_maybe_deprecate_out_named_y(f):
"""
Use the appropriate decorator, depending upon if dispatching is being used.
"""
if ARRAY_FUNCTION_ENABLED:
return _fix_out_named_y(f)
else:
return _deprecate_out_named_y(f)
@_deprecate_out_named_y
def _dispatcher(x, out=None):
return (x, out)
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
@_fix_and_maybe_deprecate_out_named_y
def fix(x, out=None):
"""
Round to nearest integer towards zero.
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters
----------
x : array_like
An array of floats to be rounded
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the input broadcasts to. If not provided or None, a
freshly-allocated array is returned.
Returns
-------
out : ndarray of floats
A float array with the same dimensions as the input.
If second argument is not supplied then a float array is returned
with the rounded values.
If a second argument is supplied the result is stored there.
The return value `out` is then a reference to that array.
See Also
--------
rint, trunc, floor, ceil
around : Round to given number of decimals
Examples
--------
>>> np.fix(3.14)
3.0
>>> np.fix(3)
3.0
>>> np.fix([2.1, 2.9, -2.1, -2.9])
array([ 2., 2., -2., -2.])
"""
# promote back to an array if flattened
res = nx.asanyarray(nx.ceil(x, out=out))
res = nx.floor(x, out=res, where=nx.greater_equal(x, 0))
# when no out argument is passed and no subclasses are involved, flatten
# scalars
if out is None and type(res) is nx.ndarray:
res = res[()]
return res
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
@_fix_and_maybe_deprecate_out_named_y
def isposinf(x, out=None):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : array_like
The input array.
out : array_like, optional
A location into which the result is stored. If provided, it must have a
shape that the input broadcasts to. If not provided or None, a
freshly-allocated boolean array is returned.
Returns
-------
out : ndarray
A boolean array with the same dimensions as the input.
If second argument is not supplied then a boolean array is returned
with values True where the corresponding element of the input is
positive infinity and values False where the element of the input is
not positive infinity.
If a second argument is supplied the result is stored there. If the
type of that array is a numeric type the result is represented as zeros
and ones, if the type is boolean then as False and True.
The return value `out` is then a reference to that array.
See Also
--------
isinf, isneginf, isfinite, isnan
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is also supplied when x is a scalar
input, if first and second arguments have different shapes, or if the
first argument has complex values
Examples
--------
>>> np.isposinf(np.PINF)
True
>>> np.isposinf(np.inf)
True
>>> np.isposinf(np.NINF)
False
>>> np.isposinf([-np.inf, 0., np.inf])
array([False, False, True])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isposinf(x, y)
array([0, 0, 1])
>>> y
array([0, 0, 1])
"""
is_inf = nx.isinf(x)
try:
signbit = ~nx.signbit(x)
except TypeError as e:
dtype = nx.asanyarray(x).dtype
raise TypeError(f'This operation is not supported for {dtype} values '
'because it would be ambiguous.') from e
else:
return nx.logical_and(is_inf, signbit, out)
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
@_fix_and_maybe_deprecate_out_named_y
def isneginf(x, out=None):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : array_like
The input array.
out : array_like, optional
A location into which the result is stored. If provided, it must have a
shape that the input broadcasts to. If not provided or None, a
freshly-allocated boolean array is returned.
Returns
-------
out : ndarray
A boolean array with the same dimensions as the input.
If second argument is not supplied then a numpy boolean array is
returned with values True where the corresponding element of the
input is negative infinity and values False where the element of
the input is not negative infinity.
If a second argument is supplied the result is stored there. If the
type of that array is a numeric type the result is represented as
zeros and ones, if the type is boolean then as False and True. The
return value `out` is then a reference to that array.
See Also
--------
isinf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is also supplied when x is a scalar
input, if first and second arguments have different shapes, or if the
first argument has complex values.
Examples
--------
>>> np.isneginf(np.NINF)
True
>>> np.isneginf(np.inf)
False
>>> np.isneginf(np.PINF)
False
>>> np.isneginf([-np.inf, 0., np.inf])
array([ True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isneginf(x, y)
array([1, 0, 0])
>>> y
array([1, 0, 0])
"""
is_inf = nx.isinf(x)
try:
signbit = nx.signbit(x)
except TypeError as e:
dtype = nx.asanyarray(x).dtype
raise TypeError(f'This operation is not supported for {dtype} values '
'because it would be ambiguous.') from e
else:
return nx.logical_and(is_inf, signbit, out)
| bsd-3-clause |
scorphus/django | tests/admin_checks/models.py | 281 | 1836 | """
Tests of ModelAdmin system checks logic.
"""
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Album(models.Model):
title = models.CharField(max_length=150)
@python_2_unicode_compatible
class Song(models.Model):
title = models.CharField(max_length=150)
album = models.ForeignKey(Album, models.CASCADE)
original_release = models.DateField(editable=False)
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
def readonly_method_on_model(self):
# does nothing
pass
class TwoAlbumFKAndAnE(models.Model):
album1 = models.ForeignKey(Album, models.CASCADE, related_name="album1_set")
album2 = models.ForeignKey(Album, models.CASCADE, related_name="album2_set")
e = models.CharField(max_length=1)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
price = models.FloatField()
authors = models.ManyToManyField(Author, through='AuthorsBooks')
class AuthorsBooks(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
book = models.ForeignKey(Book, models.CASCADE)
featured = models.BooleanField()
class State(models.Model):
name = models.CharField(max_length=15)
class City(models.Model):
state = models.ForeignKey(State, models.CASCADE)
class Influence(models.Model):
name = models.TextField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
| bsd-3-clause |
hojel/calibre | src/calibre/gui2/dialogs/catalog.py | 10 | 8552 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, sys, importlib
from calibre.customize.ui import config
from calibre.gui2.dialogs.catalog_ui import Ui_Dialog
from calibre.gui2 import dynamic, ResizableDialog, info_dialog
from calibre.customize.ui import catalog_plugins
class Catalog(ResizableDialog, Ui_Dialog):
''' Catalog Dialog builder'''
def __init__(self, parent, dbspec, ids, db):
import re, cStringIO
from calibre import prints as info
from PyQt5.uic import compileUi
ResizableDialog.__init__(self, parent)
self.dbspec, self.ids = dbspec, ids
# Display the number of books we've been passed
self.count.setText(unicode(self.count.text()).format(len(ids)))
# Display the last-used title
self.title.setText(dynamic.get('catalog_last_used_title',
_('My Books')))
self.fmts, self.widgets = [], []
for plugin in catalog_plugins():
if plugin.name in config['disabled_plugins']:
continue
name = plugin.name.lower().replace(' ', '_')
if getattr(plugin, 'plugin_path', None) is None:
try:
catalog_widget = importlib.import_module('calibre.gui2.catalog.'+name)
pw = catalog_widget.PluginWidget()
pw.initialize(name, db)
pw.ICON = I('forward.png')
self.widgets.append(pw)
[self.fmts.append([file_type.upper(), pw.sync_enabled,pw]) for file_type in plugin.file_types]
except ImportError:
info("ImportError initializing %s" % name)
continue
else:
# Load dynamic tab
form = os.path.join(plugin.resources_path,'%s.ui' % name)
klass = os.path.join(plugin.resources_path,'%s.py' % name)
compiled_form = os.path.join(plugin.resources_path,'%s_ui.py' % name)
if os.path.exists(form) and os.path.exists(klass):
# info("Adding widget for user-installed Catalog plugin %s" % plugin.name)
# Compile the .ui form provided in plugin.zip
if not os.path.exists(compiled_form):
# info('\tCompiling form', form)
buf = cStringIO.StringIO()
compileUi(form, buf)
dat = buf.getvalue()
dat = re.compile(r'QtGui.QApplication.translate\(.+?,\s+"(.+?)(?<!\\)",.+?\)',
re.DOTALL).sub(r'_("\1")', dat)
open(compiled_form, 'wb').write(dat)
# Import the dynamic PluginWidget() from .py file provided in plugin.zip
try:
sys.path.insert(0, plugin.resources_path)
catalog_widget = importlib.import_module(name)
pw = catalog_widget.PluginWidget()
pw.initialize(name)
pw.ICON = I('forward.png')
self.widgets.append(pw)
[self.fmts.append([file_type.upper(), pw.sync_enabled,pw]) for file_type in plugin.file_types]
except ImportError:
info("ImportError with %s" % name)
continue
finally:
sys.path.remove(plugin.resources_path)
else:
info("No dynamic tab resources found for %s" % name)
self.widgets = sorted(self.widgets, cmp=lambda x,y:cmp(x.TITLE, y.TITLE))
# Generate a sorted list of installed catalog formats/sync_enabled pairs
fmts = sorted([x[0] for x in self.fmts])
self.sync_enabled_formats = []
for fmt in self.fmts:
if fmt[1]:
self.sync_enabled_formats.append(fmt[0])
# Callbacks when format, title changes
self.format.currentIndexChanged.connect(self.format_changed)
self.format.currentIndexChanged.connect(self.settings_changed)
self.title.editingFinished.connect(self.settings_changed)
# Add the installed catalog format list to the format QComboBox
self.format.blockSignals(True)
self.format.addItems(fmts)
pref = dynamic.get('catalog_preferred_format', 'CSV')
idx = self.format.findText(pref)
if idx > -1:
self.format.setCurrentIndex(idx)
self.format.blockSignals(False)
if self.sync.isEnabled():
self.sync.setChecked(dynamic.get('catalog_sync_to_device', True))
self.add_to_library.setChecked(dynamic.get('catalog_add_to_library', True))
self.format.currentIndexChanged.connect(self.show_plugin_tab)
self.buttonBox.button(self.buttonBox.Apply).clicked.connect(self.apply)
self.buttonBox.button(self.buttonBox.Help).clicked.connect(self.help)
self.show_plugin_tab(None)
geom = dynamic.get('catalog_window_geom', None)
if geom is not None:
self.restoreGeometry(bytes(geom))
def show_plugin_tab(self, idx):
cf = unicode(self.format.currentText()).lower()
while self.tabs.count() > 1:
self.tabs.removeTab(1)
for pw in self.widgets:
if cf in pw.formats:
self.tabs.addTab(pw, pw.TITLE)
break
if hasattr(self.tabs.widget(1),'show_help'):
self.buttonBox.button(self.buttonBox.Help).setVisible(True)
else:
self.buttonBox.button(self.buttonBox.Help).setVisible(False)
def format_changed(self, idx):
cf = unicode(self.format.currentText())
if cf in self.sync_enabled_formats:
self.sync.setEnabled(True)
else:
self.sync.setDisabled(True)
self.sync.setChecked(False)
def settings_changed(self):
'''
When title/format change, invalidate Preset in E-book options tab
'''
cf = unicode(self.format.currentText()).lower()
if cf in ['azw3', 'epub', 'mobi'] and hasattr(self.tabs.widget(1), 'settings_changed'):
self.tabs.widget(1).settings_changed("title/format")
@property
def fmt_options(self):
ans = {}
if self.tabs.count() > 1:
w = self.tabs.widget(1)
ans = w.options()
return ans
def save_catalog_settings(self):
self.catalog_format = unicode(self.format.currentText())
dynamic.set('catalog_preferred_format', self.catalog_format)
self.catalog_title = unicode(self.title.text())
dynamic.set('catalog_last_used_title', self.catalog_title)
self.catalog_sync = bool(self.sync.isChecked())
dynamic.set('catalog_sync_to_device', self.catalog_sync)
dynamic.set('catalog_window_geom', bytearray(self.saveGeometry()))
dynamic.set('catalog_add_to_library', self.add_to_library.isChecked())
def apply(self, *args):
# Store current values without building catalog
self.save_catalog_settings()
if self.tabs.count() > 1:
self.tabs.widget(1).options()
def accept(self):
self.save_catalog_settings()
return ResizableDialog.accept(self)
def help(self):
'''
To add help functionality for a specific format:
In gui2.catalog.catalog_<format>.py, add the following:
from calibre.gui2 import open_url
from PyQt5.Qt import QUrl
In the PluginWidget() class, add this method:
def show_help(self):
url = 'file:///' + P('catalog/help_<format>.html')
open_url(QUrl(url))
Create the help file at resources/catalog/help_<format>.html
'''
if self.tabs.count() > 1 and hasattr(self.tabs.widget(1),'show_help'):
try:
self.tabs.widget(1).show_help()
except:
info_dialog(self, _('No help available'),
_('No help available for this output format.'),
show_copy_button=False,
show=True)
def reject(self):
dynamic.set('catalog_window_geom', bytearray(self.saveGeometry()))
ResizableDialog.reject(self)
| gpl-3.0 |
jpalvarezf/csvkit | tests/test_utilities/test_csvjson.py | 20 | 4672 | #!/usr/bin/env python
import json
import six
try:
import unittest2 as unittest
except ImportError:
import unittest
from csvkit.exceptions import NonUniqueKeyColumnException
from csvkit.utilities.csvjson import CSVJSON
class TestCSVJSON(unittest.TestCase):
def test_simple(self):
args = ['examples/dummy.csv']
output_file = six.StringIO()
utility = CSVJSON(args, output_file)
utility.main()
js = json.loads(output_file.getvalue())
self.assertDictEqual(js[0], {"a": "1", "c": "3", "b": "2"})
def test_indentation(self):
args = ['-i', '4', 'examples/dummy.csv']
output_file = six.StringIO()
utility = CSVJSON(args, output_file)
utility.main()
js = json.loads(output_file.getvalue())
self.assertDictEqual(js[0], {"a": "1", "c": "3", "b": "2"})
def test_keying(self):
args = ['-k', 'a', 'examples/dummy.csv']
output_file = six.StringIO()
utility = CSVJSON(args, output_file)
utility.main()
js = json.loads(output_file.getvalue())
self.assertDictEqual(js, { "1": {"a": "1", "c": "3", "b": "2"} })
def test_duplicate_keys(self):
args = ['-k', 'a', 'examples/dummy3.csv']
output_file = six.StringIO()
utility = CSVJSON(args, output_file)
self.assertRaises(NonUniqueKeyColumnException, utility.main)
def test_geojson(self):
args = ['--lat', 'latitude', '--lon', 'longitude', 'examples/test_geo.csv']
output_file = six.StringIO()
utility = CSVJSON(args, output_file)
utility.main()
geojson = json.loads(output_file.getvalue())
self.assertEqual(geojson['type'], 'FeatureCollection')
self.assertFalse('crs' in geojson)
self.assertEqual(geojson['bbox'], [-95.334619, 32.299076986939205, -95.250699, 32.351434])
self.assertEqual(len(geojson['features']), 17)
for feature in geojson['features']:
self.assertEqual(feature['type'], 'Feature')
self.assertFalse('id' in feature)
self.assertEqual(len(feature['properties']), 10)
geometry = feature['geometry']
self.assertEqual(len(geometry['coordinates']), 2)
self.assertTrue(isinstance(geometry['coordinates'][0], float))
self.assertTrue(isinstance(geometry['coordinates'][1], float))
def test_geojson_with_id(self):
args = ['--lat', 'latitude', '--lon', 'longitude', '-k', 'slug', 'examples/test_geo.csv']
output_file = six.StringIO()
utility = CSVJSON(args, output_file)
utility.main()
geojson = json.loads(output_file.getvalue())
self.assertEqual(geojson['type'], 'FeatureCollection')
self.assertFalse('crs' in geojson)
self.assertEqual(geojson['bbox'], [-95.334619, 32.299076986939205, -95.250699, 32.351434])
self.assertEqual(len(geojson['features']), 17)
for feature in geojson['features']:
self.assertEqual(feature['type'], 'Feature')
self.assertTrue('id' in feature)
self.assertEqual(len(feature['properties']), 9)
geometry = feature['geometry']
self.assertEqual(len(geometry['coordinates']), 2)
self.assertTrue(isinstance(geometry['coordinates'][0], float))
self.assertTrue(isinstance(geometry['coordinates'][1], float))
def test_geojson_with_crs(self):
args = ['--lat', 'latitude', '--lon', 'longitude', '--crs', 'EPSG:4269', 'examples/test_geo.csv']
output_file = six.StringIO()
utility = CSVJSON(args, output_file)
utility.main()
geojson = json.loads(output_file.getvalue())
self.assertEqual(geojson['type'], 'FeatureCollection')
self.assertTrue('crs' in geojson)
self.assertEqual(geojson['bbox'], [-95.334619, 32.299076986939205, -95.250699, 32.351434])
self.assertEqual(len(geojson['features']), 17)
crs = geojson['crs']
self.assertEqual(crs['type'], 'name')
self.assertEqual(crs['properties']['name'], 'EPSG:4269')
def test_json_streaming(self):
args = ['--stream', 'examples/dummy3.csv']
output_file = six.StringIO()
utility = CSVJSON(args, output_file)
utility.main()
result = list(map(json.loads, output_file.getvalue().splitlines()))
self.assertEqual(len(result), 2)
self.assertDictEqual(result[0], {"a": "1", "c": "3", "b": "2"})
self.assertDictEqual(result[1], {"a": "1", "c": "5", "b": "4"})
| mit |
bluevoda/BloggyBlog | lib/python3.4/site-packages/django/utils/tree.py | 116 | 4871 | """
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
import copy
from django.utils.encoding import force_str, force_text
class Node(object):
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
"""
self.children = children[:] if children else []
self.connector = connector or self.default
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
@classmethod
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
def __str__(self):
template = '(NOT (%s: %s))' if self.negated else '(%s: %s)'
return force_str(template % (self.connector, ', '.join(force_text(c) for c in self.children)))
def __repr__(self):
return str("<%s: %s>") % (self.__class__.__name__, self)
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = copy.deepcopy(self.children, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __bool__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, data, conn_type, squash=True):
"""
Combines this tree and the data represented by data using the
connector conn_type. The combine is done by squashing the node other
away if possible.
This tree (self) will never be pushed to a child node of the
combined tree, nor will the connector or negated properties change.
The function returns a node which can be used in place of data
regardless if the node other got squashed or not.
If `squash` is False the data is prepared and added as a child to
this tree without further logic.
"""
if data in self.children:
return data
if not squash:
self.children.append(data)
return data
if self.connector == conn_type:
# We can reuse self.children to append or squash the node other.
if (isinstance(data, Node) and not data.negated and
(data.connector == conn_type or len(data) == 1)):
# We can squash the other node's children directly into this
# node. We are just doing (AB)(CD) == (ABCD) here, with the
# addition that if the length of the other node is 1 the
# connector doesn't matter. However, for the len(self) == 1
# case we don't want to do the squashing, as it would alter
# self.connector.
self.children.extend(data.children)
return self
else:
# We could use perhaps additional logic here to see if some
# children could be used for pushdown here.
self.children.append(data)
return data
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, data]
return data
def negate(self):
"""
Negate the sense of the root connector.
"""
self.negated = not self.negated
| gpl-3.0 |
HKUST-SING/tensorflow | tensorflow/contrib/ndlstm/python/misc_test.py | 93 | 2875 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.ndlstm.python import misc as misc_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
misc = misc_lib
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class LstmMiscTest(test_util.TensorFlowTestCase):
def testPixelsAsVectorDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(2, 7, 11, 5))
outputs = misc.pixels_as_vector(inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 7 * 11 * 5))
def testPoolAsVectorDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(2, 7, 11, 5))
outputs = misc.pool_as_vector(inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 5))
def testOneHotPlanes(self):
with self.test_session():
inputs = constant_op.constant([0, 1, 3])
outputs = misc.one_hot_planes(inputs, 4)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (3, 1, 1, 4))
target = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
self.assertAllClose(result.reshape(-1), target.reshape(-1))
def testOneHotMask(self):
with self.test_session():
data = np.array([[0, 1, 2], [2, 0, 1]]).reshape(2, 3, 1)
inputs = constant_op.constant(data)
outputs = misc.one_hot_mask(inputs, 3)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 3, 3))
target = np.array([[[1, 0, 0], [0, 1, 0]], [[0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [1, 0, 0]]]).transpose(1, 2, 0)
self.assertAllClose(result.reshape(-1), target.reshape(-1))
if __name__ == "__main__":
test.main()
| apache-2.0 |
40223110/w16b_test | static/Brython3.1.3-20150514-095342/Lib/site-packages/editor.py | 84 | 5287 | # -*- coding: utf-8 -*-
import sys
import time
import traceback
import dis
from browser import document as doc, window, alert, ajax
# set height of container to 66% of screen
_height = doc.documentElement.clientHeight
_s = doc['container']
_s.style.height = '%spx' % int(_height * 0.66)
has_ace = True
try:
editor = window.ace.edit("editor")
session = editor.getSession()
session.setMode("ace/mode/python")
editor.setOptions({
'width': '390px;',
'enableLiveAutocompletion': True,
'enableSnippets': True,
'highlightActiveLine': False,
'highlightSelectedWord': True
})
except:
from browser import html
editor = html.TEXTAREA(rows=20, cols=70)
doc["editor"] <= editor
def get_value(): return editor.value
def set_value(x):editor.value = x
editor.getValue = get_value
editor.setValue = set_value
has_ace = False
if sys.has_local_storage:
from local_storage import storage
else:
storage = None
if 'set_debug' in doc:
__BRYTHON__.debug = int(doc['set_debug'].checked)
def reset_src():
if storage is not None and "py_src" in storage:
editor.setValue(storage["py_src"])
else:
editor.setValue('''#coding: utf-8
# 猜數字遊戲
import random
標準答案 = random.randint(1, 100)
print(標準答案)
你猜的數字 = int(input("請輸入您所猜的整數:"))
猜測次數 = 1
while 標準答案 != 你猜的數字:
if 標準答案 < 你猜的數字:
print("太大了,再猜一次 :)加油")
else:
print("太小了,再猜一次 :)加油")
你猜的數字 = int(input("請輸入您所猜的整數:"))
猜測次數 += 1
print("猜對了!總共猜了", 猜測次數, "次")
''')
def reset_src_area():
if storage and "py_src" in storage:
editor.value = storage["py_src"]
else:
editor.value = '''#coding: utf-8
# 猜數字遊戲
import random
標準答案 = random.randint(1, 100)
print(標準答案)
你猜的數字 = int(input("請輸入您所猜的整數:"))
猜測次數 = 1
while 標準答案 != 你猜的數字:
if 標準答案 < 你猜的數字:
print("太大了,再猜一次 :)加油")
else:
print("太小了,再猜一次 :)加油")
你猜的數字 = int(input("請輸入您所猜的整數:"))
猜測次數 += 1
print("猜對了!總共猜了", 猜測次數, "次")
'''
class cOutput:
def write(self, data):
doc["console"].value += str(data)
def flush(self):
pass
sys.stdout = cOutput()
sys.stderr = cOutput()
def to_str(xx):
return str(xx)
output = ''
def show_console(ev):
doc["console"].value = output
doc["console"].cols = 60
# load a Python script
def load_script(evt):
_name = evt.target.value + '?foo=%s' % time.time()
editor.setValue(open(_name).read())
def err_msg():
doc["result"].html = "server didn't reply after %s seconds" %timeout
def on_complete(req):
print(req.text)
# run a script, in global namespace if in_globals is True
def run(in_globals=False):
global output
doc["console"].value = ''
src = editor.getValue()
if storage is not None:
storage["py_src"] = src
t0 = time.perf_counter()
try:
if(in_globals):
exec(src)
else:
ns = {}
exec(src, ns)
state = 1
except Exception as exc:
traceback.print_exc(file=sys.stderr)
state = 0
output = doc["console"].value
print('Brython: %6.2f ms' % ((time.perf_counter() - t0) * 1000.0))
# run with CPython
req = ajax.ajax()
req.bind('complete',on_complete)
req.set_timeout(4,err_msg)
req.open('POST','/cgi-bin/speed.py',True)
req.set_header('content-type','application/x-www-form-urlencoded')
req.send({'src':src})
return state
# load a Python script
def load_script(evt):
_name=evt.target.value+'?foo=%s' %time.time()
editor.setValue(open(_name).read())
def show_js(ev):
src = editor.getValue()
doc["console"].value = dis.dis(src)
# Yen defined
def clear_text(ev):
editor.setValue('')
if sys.has_local_storage:
storage["py_src"]=''
doc["console"].value=''
def clear_src(ev):
editor.setValue('')
if sys.has_local_storage:
storage["py_src"]=''
def clear_canvas(ev):
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# Store the current transformation matrix
ctx.save();
# Use the identity matrix while clearing the canvas
ctx.setTransform(1, 0, 0, 1, 0, 0);
ctx.clearRect(0, 0, canvas.width, canvas.height);
# Restore the transform
ctx.restore();
#ctx.clearRect(0, 0, canvas.width, canvas.height)
def clear_console(ev):
doc["console"].value=''
def change_theme(evt):
_theme=evt.target.value
editor.setTheme(_theme)
if storage:
storage["ace_theme"]=_theme
doc["ace_theme"].bind("change",change_theme)
def reset_theme():
if storage:
if "ace_theme" in storage:
editor.setTheme(storage["ace_theme"])
doc["ace_theme"].value=storage["ace_theme"]
def reset_the_src(ev):
if has_ace:
reset_src()
reset_theme()
else:
reset_src_area()
if has_ace:
reset_src()
else:
reset_src_area()
| agpl-3.0 |
dims/cinder | cinder/tests/unit/test_hitachi_hbsd_snm2_fc.py | 20 | 29876 | # Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Self test for Hitachi Block Storage Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi import hbsd_basiclib
from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_fc
from cinder.volume.drivers.hitachi import hbsd_snm2
def _exec_hsnm(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_vals.get(args)
def _exec_hsnm_get_lu_ret_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_ret_err.get(args)
def _exec_hsnm_get_lu_vol_type_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_vol_type_err.get(args)
def _exec_hsnm_get_lu_dppool_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_dppool_err.get(args)
def _exec_hsnm_get_lu_size_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_size_err.get(args)
def _exec_hsnm_get_lu_num_port_err(*args, **kargs):
return HBSDSNM2FCDriverTest.hsnm_get_lu_num_port_err.get(args)
class HBSDSNM2FCDriverTest(test.TestCase):
"""Test HBSDSNM2FCDriver."""
audppool_result = " DP RAID \
Current Utilization Current Over Replication\
Available Current Replication Rotational \
\
Stripe \
Needing Preparation\n\
Pool Tier Mode Level Total Capacity Consumed Capacity \
Percent Provisioning Percent Capacity \
Utilization Percent Type Speed Encryption Status \
\
Reconstruction Progress Size Capacity\n\
30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \
1% 24835% 532.0 GB \
1% SAS 10000rpm N/A Normal \
N/A \
256KB 0.0 GB"
aureplicationlocal_result = "Pair Name LUN Pair \
LUN Status Copy Type Group \
Point-in-Time MU Number\n\
0 10 0 Split( 99%) \
ShadowImage ---:Ungrouped N/A\
"
auluref_result = " Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 Normal"
auluref_result1 = " Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 DUMMY"
auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \
Name Port Name Host Group\n\
HBSD-00 10000000C97BCE7A 001:HBSD-01\n\
Assigned WWN\n Name Port Name \
Host Group\n abcdefg 10000000C97BCE7A \
001:HBSD-01"
aufibre1_result = "Port Information\n\
Port Address\n CTL Port\
Node Name Port Name Setting Current\n 0 0 \
50060E801053C2E0 50060E801053C2E0 0000EF 272700"
auhgmap_result = "Mapping Mode = ON\nPort Group \
H-LUN LUN\n 00 001:HBSD-00 0 1000"
hsnm_vals = {
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
('aureplicationlocal',
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
[0, "", ""],
('aureplicationlocal',
'-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'):
[1, "", ""],
('aureplicationlocal', '-unit None -refer -pvol 1'):
[0, "%s" % aureplicationlocal_result, ""],
('aureplicationlocal', '-unit None -refer -pvol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -refer -svol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
[0, "", ""],
('auluchgsize', '-unit None -lu 1 -size 256g'):
[0, "", ""],
('auludel', '-unit None -lu 1 -f'): [0, 0, ""],
('auludel', '-unit None -lu 3 -f'): [1, 0, ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, 0, ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""],
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""],
('auhgmap', '-unit None -add 0 0 1 1 1'): [0, 0, ""],
('auhgwwn', '-unit None -refer'): [0, "%s" % auhgwwn_result, ""],
('aufibre1', '-unit None -refer'): [0, "%s" % aufibre1_result, ""],
('auhgmap', '-unit None -refer'): [0, "%s" % auhgmap_result, ""]}
auluref_ret_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 Normal"
hsnm_get_lu_ret_err = {
('auluref', '-unit None -lu 0'): [1, "%s" % auluref_ret_err, ""],
}
auluref_vol_type_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 DUMMY"
hsnm_get_lu_vol_type_err = {
('auluref', '-unit None -lu 0'):
[0, "%s" % auluref_vol_type_err, ""],
}
auluref_dppool_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 N/A Enable 0 Normal"
hsnm_get_lu_dppool_err = {
('auluref', '-unit None -lu 0'):
[0, "%s" % auluref_dppool_err, ""],
}
auluref_size_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097151 blocks 256KB N/A 0 Enable 0 Normal"
hsnm_get_lu_size_err = {
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_size_err, ""],
}
auluref_num_port_err = "Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 1 Normal"
hsnm_get_lu_num_port_err = {
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_num_port_err, ""],
}
# The following information is passed on to tests, when creating a volume
_VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
'provider_location': '1', 'name': 'test',
'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
test_volume = {'name': 'test_volume', 'size': 128,
'id': 'test-volume-0',
'provider_location': '1', 'status': 'available'}
test_volume_error = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '3', 'status': 'available'}
test_volume_error1 = {'name': 'test_volume_error', 'size': 128,
'id': 'test-volume-error',
'provider_location': None, 'status': 'available'}
test_volume_error2 = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '1', 'status': 'available'}
test_volume_error3 = {'name': 'test_volume3', 'size': 128,
'id': 'test-volume3',
'volume_metadata': [{'key': 'type',
'value': 'V-VOL'}],
'provider_location': '1', 'status': 'available'}
test_volume_error4 = {'name': 'test_volume4', 'size': 128,
'id': 'test-volume2',
'provider_location': '3', 'status': 'available'}
test_snapshot = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
'provider_location': '1', 'status': 'available'}
test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0',
'volume': test_volume_error,
'provider_location': None, 'status': 'available'}
UNIT_NAME = 'HUS110_91122819'
test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME}
test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME}
test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME}
test_existing_no_ldev_ref = {'unit_name': UNIT_NAME}
test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None}
test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'}
test_existing_no_unit_ref = {'ldev': '0'}
def __init__(self, *args, **kwargs):
super(HBSDSNM2FCDriverTest, self).__init__(*args, **kwargs)
def setUp(self):
super(HBSDSNM2FCDriverTest, self).setUp()
self._setup_config()
self._setup_driver()
def _setup_config(self):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.hitachi_pool_id = 30
self.configuration.hitachi_target_ports = "00"
self.configuration.hitachi_debug_level = 0
self.configuration.hitachi_serial_number = "None"
self.configuration.hitachi_unit_name = "None"
self.configuration.hitachi_group_request = False
self.configuration.hitachi_zoning_request = False
self.configuration.config_group = "None"
self.configuration.hitachi_ldev_range = [0, 100]
self.configuration.hitachi_default_copy_method = 'SI'
self.configuration.hitachi_copy_check_interval = 1
self.configuration.hitachi_copy_speed = 3
def _setup_driver(self):
self.driver = hbsd_fc.HBSDFCDriver(
configuration=self.configuration)
context = None
db = None
self.driver.common = hbsd_common.HBSDCommon(
self.configuration, self.driver, context, db)
self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration)
self.driver.common.pair_flock = \
self.driver.common.command.set_pair_flock()
self.driver.common.horcmgr_flock = \
self.driver.common.command.set_horcmgr_flock()
self.driver.do_setup_status.set()
# API test cases
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume(self, arg1, arg2, arg3):
"""test create_volume."""
ret = self.driver.create_volume(self._VOLUME)
vol = self._VOLUME.copy()
vol['provider_location'] = ret['provider_location']
self.assertEqual('1', vol['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_error(self, arg1, arg2, arg3):
"""test create_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_volume,
self.test_volume_error)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats(self, arg1, arg2):
"""test get_volume_stats."""
stats = self.driver.get_volume_stats(True)
self.assertEqual('Hitachi', stats['vendor_name'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats_error(self, arg1, arg2):
"""test get_volume_stats."""
self.configuration.hitachi_pool_id = 29
stats = self.driver.get_volume_stats(True)
self.assertEqual({}, stats)
self.configuration.hitachi_pool_id = 30
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume(self, arg1, arg2):
"""test extend_volume."""
self.driver.extend_volume(self._VOLUME, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume_error(self, arg1, arg2):
"""test extend_volume."""
self.assertRaises(exception.HBSDError, self.driver.extend_volume,
self.test_volume_error3, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume(self, arg1, arg2):
"""test delete_volume."""
self.driver.delete_volume(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume_error(self, arg1, arg2):
"""test delete_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.delete_volume,
self.test_volume_error4)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
ret = self.driver.create_volume(self._VOLUME)
ret = self.driver.create_snapshot(self.test_snapshot)
self.assertEqual('1', ret['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_snapshot,
self.test_snapshot_error2)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot_error(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot_error2)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
vol = self.driver.create_volume_from_snapshot(self._VOLUME,
self.test_snapshot)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
self.assertRaises(exception.HBSDError,
self.driver.create_volume_from_snapshot,
self.test_volume_error2, self.test_snapshot)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
vol = self.driver.create_cloned_volume(self._VOLUME,
self.test_volume)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error1)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
self.assertRaises(exception.HBSDError,
self.driver.create_cloned_volume,
self._VOLUME, self.test_volume_error1)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection(self, arg1, arg2):
"""test initialize connection."""
connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'}
rc = self.driver.initialize_connection(self._VOLUME, connector)
self.assertEqual('fibre_channel', rc['driver_volume_type'])
self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn'])
self.assertEqual(1, rc['data']['target_lun'])
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection_error(self, arg1, arg2):
"""test initialize connection."""
connector = {'wwpns': 'x', 'ip': '0xc0a80100'}
self.assertRaises(exception.HBSDError,
self.driver.initialize_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection(self, arg1):
"""test terminate connection."""
connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'}
rc = self.driver.terminate_connection(self._VOLUME, connector)
self.assertEqual('fibre_channel', rc['driver_volume_type'])
self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn'])
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection_error(self, arg1):
"""test terminate connection."""
connector = {'ip': '0xc0a80100'}
self.assertRaises(exception.HBSDError,
self.driver.terminate_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_manage_existing(self, arg1, arg2):
rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref)
self.assertEqual(0, rc['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
size = self.driver.manage_existing_get_size(self._VOLUME,
self.test_existing_ref)
self.assertEqual(1, size)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_none_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_invalid_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_no_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_none_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_invalid_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_no_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_ret_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_ret_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_vol_type_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_vol_type_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_dppool_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_dppool_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_size_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_size_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_get_lu_num_port_err)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_lu_num_port_err(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_unmanage(self, arg1, arg2):
self.driver.unmanage(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_unmanage_busy(self, arg1, arg2):
self.assertRaises(exception.HBSDVolumeIsBusy,
self.driver.unmanage, self.test_volume_error3)
| apache-2.0 |
TeslaProject/external_chromium_org | third_party/cython/src/Cython/Compiler/Tests/TestSignatureMatching.py | 125 | 3408 | import unittest
from Cython.Compiler import PyrexTypes as pt
from Cython.Compiler.ExprNodes import NameNode
from Cython.Compiler.PyrexTypes import CFuncTypeArg
def cfunctype(*arg_types):
return pt.CFuncType(pt.c_int_type,
[ CFuncTypeArg("name", arg_type, None) for arg_type in arg_types ])
def cppclasstype(name, base_classes):
return pt.CppClassType(name, None, 'CPP_'+name, base_classes)
class SignatureMatcherTest(unittest.TestCase):
"""
Test the signature matching algorithm for overloaded signatures.
"""
def assertMatches(self, expected_type, arg_types, functions):
args = [ NameNode(None, type=arg_type) for arg_type in arg_types ]
match = pt.best_match(args, functions)
if expected_type is not None:
self.assertNotEqual(None, match)
self.assertEqual(expected_type, match.type)
def test_cpp_reference_single_arg(self):
function_types = [
cfunctype(pt.CReferenceType(pt.c_int_type)),
cfunctype(pt.CReferenceType(pt.c_long_type)),
cfunctype(pt.CReferenceType(pt.c_double_type)),
]
functions = [ NameNode(None, type=t) for t in function_types ]
self.assertMatches(function_types[0], [pt.c_int_type], functions)
self.assertMatches(function_types[1], [pt.c_long_type], functions)
self.assertMatches(function_types[2], [pt.c_double_type], functions)
def test_cpp_reference_two_args(self):
function_types = [
cfunctype(
pt.CReferenceType(pt.c_int_type), pt.CReferenceType(pt.c_long_type)),
cfunctype(
pt.CReferenceType(pt.c_long_type), pt.CReferenceType(pt.c_long_type)),
]
functions = [ NameNode(None, type=t) for t in function_types ]
self.assertMatches(function_types[0], [pt.c_int_type, pt.c_long_type], functions)
self.assertMatches(function_types[1], [pt.c_long_type, pt.c_long_type], functions)
self.assertMatches(function_types[1], [pt.c_long_type, pt.c_int_type], functions)
def test_cpp_reference_cpp_class(self):
classes = [ cppclasstype("Test%d"%i, []) for i in range(2) ]
function_types = [
cfunctype(pt.CReferenceType(classes[0])),
cfunctype(pt.CReferenceType(classes[1])),
]
functions = [ NameNode(None, type=t) for t in function_types ]
self.assertMatches(function_types[0], [classes[0]], functions)
self.assertMatches(function_types[1], [classes[1]], functions)
def test_cpp_reference_cpp_class_and_int(self):
classes = [ cppclasstype("Test%d"%i, []) for i in range(2) ]
function_types = [
cfunctype(pt.CReferenceType(classes[0]), pt.c_int_type),
cfunctype(pt.CReferenceType(classes[0]), pt.c_long_type),
cfunctype(pt.CReferenceType(classes[1]), pt.c_int_type),
cfunctype(pt.CReferenceType(classes[1]), pt.c_long_type),
]
functions = [ NameNode(None, type=t) for t in function_types ]
self.assertMatches(function_types[0], [classes[0], pt.c_int_type], functions)
self.assertMatches(function_types[1], [classes[0], pt.c_long_type], functions)
self.assertMatches(function_types[2], [classes[1], pt.c_int_type], functions)
self.assertMatches(function_types[3], [classes[1], pt.c_long_type], functions)
| bsd-3-clause |
Jgarcia-IAS/Fidelizacion_odoo | openerp/addons/account/edi/invoice.py | 342 | 13984 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.addons.edi import EDIMixin
from werkzeug import url_encode
INVOICE_LINE_EDI_STRUCT = {
'name': True,
'origin': True,
'uos_id': True,
'product_id': True,
'price_unit': True,
'quantity': True,
'discount': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
INVOICE_TAX_LINE_EDI_STRUCT = {
'name': True,
'base': True,
'amount': True,
'manual': True,
'sequence': True,
'base_amount': True,
'tax_amount': True,
}
INVOICE_EDI_STRUCT = {
'name': True,
'origin': True,
'company_id': True, # -> to be changed into partner
'type': True, # -> reversed at import
'internal_number': True, # -> reference at import
'comment': True,
'date_invoice': True,
'date_due': True,
'partner_id': True,
'payment_term': True,
#custom: currency_id
'invoice_line': INVOICE_LINE_EDI_STRUCT,
'tax_line': INVOICE_TAX_LINE_EDI_STRUCT,
# fields used for web preview only - discarded on import
#custom: 'partner_ref'
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
}
class account_invoice(osv.osv, EDIMixin):
_inherit = 'account.invoice'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a supplier or customer invoice"""
edi_struct = dict(edi_struct or INVOICE_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner = self.pool.get('res.partner')
edi_doc_list = []
for invoice in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, invoice, context=context)
edi_doc = super(account_invoice,self).edi_export(cr, uid, [invoice], edi_struct, context)[0]
edi_doc.update({
'company_address': res_company.edi_export_address(cr, uid, invoice.company_id, context=context),
'company_paypal_account': invoice.company_id.paypal_account,
'partner_address': res_partner.edi_export(cr, uid, [invoice.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [invoice.currency_id], context=context)[0],
'partner_ref': invoice.reference or False,
})
edi_doc_list.append(edi_doc)
return edi_doc_list
def _edi_tax_account(self, cr, uid, invoice_type='out_invoice', context=None):
#TODO/FIXME: should select proper Tax Account
account_pool = self.pool.get('account.account')
account_ids = account_pool.search(cr, uid, [('type','<>','view'),('type','<>','income'), ('type', '<>', 'closed')])
tax_account = False
if account_ids:
tax_account = account_pool.browse(cr, uid, account_ids[0])
return tax_account
def _edi_invoice_account(self, cr, uid, partner_id, invoice_type, context=None):
res_partner = self.pool.get('res.partner')
partner = res_partner.browse(cr, uid, partner_id, context=context)
if invoice_type in ('out_invoice', 'out_refund'):
invoice_account = partner.property_account_receivable
else:
invoice_account = partner.property_account_payable
return invoice_account
def _edi_product_account(self, cr, uid, product_id, invoice_type, context=None):
product_pool = self.pool.get('product.product')
product = product_pool.browse(cr, uid, product_id, context=context)
if invoice_type in ('out_invoice','out_refund'):
account = product.property_account_income or product.categ_id.property_account_income_categ
else:
account = product.property_account_expense or product.categ_id.property_account_expense_categ
return account
def _edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address','type'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
invoice_type = edi_document['type']
if invoice_type.startswith('out_'):
company_address_edi['customer'] = True
else:
company_address_edi['supplier'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def edi_import(self, cr, uid, edi_document, context=None):
""" During import, invoices will import the company that is provided in the invoice as
a new partner (e.g. supplier company for a customer invoice will be come a supplier
record for the new invoice.
Summary of tasks that need to be done:
- import company as a new partner, if type==in then supplier=1, else customer=1
- partner_id field is modified to point to the new partner
- company_address data used to add address to new partner
- change type: out_invoice'<->'in_invoice','out_refund'<->'in_refund'
- reference: should contain the value of the 'internal_number'
- reference_type: 'none'
- internal number: reset to False, auto-generated
- journal_id: should be selected based on type: simply put the 'type'
in the context when calling create(), will be selected correctly
- payment_term: if set, create a default one based on name...
- for invoice lines, the account_id value should be taken from the
product's default, i.e. from the default category, as it will not
be provided.
- for tax lines, we disconnect from the invoice.line, so all tax lines
will be of type 'manual', and default accounts should be picked based
on the tax config of the DB where it is imported.
"""
if context is None:
context = {}
self._edi_requires_attributes(('company_id','company_address','type','invoice_line','currency'), edi_document)
# extract currency info
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
currency = res_currency.browse(cr, uid, currency_id)
edi_document['currency_id'] = self.edi_m2o(cr, uid, currency, context=context)
# change type: out_invoice'<->'in_invoice','out_refund'<->'in_refund'
invoice_type = edi_document['type']
invoice_type = invoice_type.startswith('in_') and invoice_type.replace('in_','out_') or invoice_type.replace('out_','in_')
edi_document['type'] = invoice_type
# import company as a new partner
partner_id = self._edi_import_company(cr, uid, edi_document, context=context)
# Set Account
invoice_account = self._edi_invoice_account(cr, uid, partner_id, invoice_type, context=context)
edi_document['account_id'] = invoice_account and self.edi_m2o(cr, uid, invoice_account, context=context) or False
# reference: should contain the value of the 'internal_number'
edi_document['reference'] = edi_document.get('internal_number', False)
# reference_type: 'none'
edi_document['reference_type'] = 'none'
# internal number: reset to False, auto-generated
edi_document['internal_number'] = False
# discard web preview fields, if present
edi_document.pop('partner_ref', None)
# journal_id: should be selected based on type: simply put the 'type' in the context when calling create(), will be selected correctly
context = dict(context, type=invoice_type)
# for invoice lines, the account_id value should be taken from the product's default, i.e. from the default category, as it will not be provided.
for edi_invoice_line in edi_document['invoice_line']:
product_info = edi_invoice_line['product_id']
product_id = self.edi_import_relation(cr, uid, 'product.product', product_info[1],
product_info[0], context=context)
account = self._edi_product_account(cr, uid, product_id, invoice_type, context=context)
# TODO: could be improved with fiscal positions perhaps
# account = fpos_obj.map_account(cr, uid, fiscal_position_id, account.id)
edi_invoice_line['account_id'] = self.edi_m2o(cr, uid, account, context=context) if account else False
# discard web preview fields, if present
edi_invoice_line.pop('price_subtotal', None)
# for tax lines, we disconnect from the invoice.line, so all tax lines will be of type 'manual', and default accounts should be picked based
# on the tax config of the DB where it is imported.
tax_account = self._edi_tax_account(cr, uid, context=context)
tax_account_info = self.edi_m2o(cr, uid, tax_account, context=context)
for edi_tax_line in edi_document.get('tax_line', []):
edi_tax_line['account_id'] = tax_account_info
edi_tax_line['manual'] = True
return super(account_invoice,self).edi_import(cr, uid, edi_document, context=context)
def _edi_record_display_action(self, cr, uid, id, context=None):
"""Returns an appropriate action definition dict for displaying
the record with ID ``rec_id``.
:param int id: database ID of record to display
:return: action definition dict
"""
action = super(account_invoice,self)._edi_record_display_action(cr, uid, id, context=context)
try:
invoice = self.browse(cr, uid, id, context=context)
if 'out_' in invoice.type:
view_ext_id = 'invoice_form'
journal_type = 'sale'
else:
view_ext_id = 'invoice_supplier_form'
journal_type = 'purchase'
ctx = "{'type': '%s', 'journal_type': '%s'}" % (invoice.type, journal_type)
action.update(context=ctx)
view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', view_ext_id)[1]
action.update(views=[(view_id,'form'), (False, 'tree')])
except ValueError:
# ignore if views are missing
pass
return action
def _edi_paypal_url(self, cr, uid, ids, field, arg, context=None):
res = dict.fromkeys(ids, False)
for inv in self.browse(cr, uid, ids, context=context):
if inv.type == 'out_invoice' and inv.company_id.paypal_account:
params = {
"cmd": "_xclick",
"business": inv.company_id.paypal_account,
"item_name": "%s Invoice %s" % (inv.company_id.name, inv.number or ''),
"invoice": inv.number,
"amount": inv.residual,
"currency_code": inv.currency_id.name,
"button_subtype": "services",
"no_note": "1",
"bn": "OpenERP_Invoice_PayNow_" + inv.currency_id.name,
}
res[inv.id] = "https://www.paypal.com/cgi-bin/webscr?" + url_encode(params)
return res
_columns = {
'paypal_url': fields.function(_edi_paypal_url, type='char', string='Paypal Url'),
}
class account_invoice_line(osv.osv, EDIMixin):
_inherit='account.invoice.line'
class account_invoice_tax(osv.osv, EDIMixin):
_inherit = "account.invoice.tax"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sauloal/PiCastPy | werkzeug/testsuite/security.py | 66 | 1838 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.security
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the security helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.security import check_password_hash, generate_password_hash, \
safe_join
class SecurityTestCase(WerkzeugTestCase):
def test_password_hashing(self):
"""Test the password hashing and password hash checking"""
hash1 = generate_password_hash('default')
hash2 = generate_password_hash(u'default', method='sha1')
assert hash1 != hash2
assert check_password_hash(hash1, 'default')
assert check_password_hash(hash2, 'default')
assert hash1.startswith('sha1$')
assert hash2.startswith('sha1$')
fakehash = generate_password_hash('default', method='plain')
assert fakehash == 'plain$$default'
assert check_password_hash(fakehash, 'default')
mhash = generate_password_hash(u'default', method='md5')
assert mhash.startswith('md5$')
assert check_password_hash(mhash, 'default')
legacy = 'md5$$c21f969b5f03d33d43e04f8f136e7682'
assert check_password_hash(legacy, 'default')
legacy = u'md5$$c21f969b5f03d33d43e04f8f136e7682'
assert check_password_hash(legacy, 'default')
def test_safe_join(self):
"""Test the safe joining helper"""
assert safe_join('foo', 'bar/baz') == os.path.join('foo', 'bar/baz')
assert safe_join('foo', '../bar/baz') is None
if os.name == 'nt':
assert safe_join('foo', 'foo\\bar') is None
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SecurityTestCase))
return suite
| mit |
cliffe/SecGen | modules/utilities/unix/audit_tools/ghidra/files/release/Ghidra/Features/Python/data/jython-2.7.1/Lib/distutils/tests/test_install_headers.py | 141 | 1269 | """Tests for distutils.command.install_headers."""
import sys
import os
import unittest
import getpass
from distutils.command.install_headers import install_headers
from distutils.tests import support
from test.test_support import run_unittest
class InstallHeadersTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_simple_run(self):
# we have two headers
header_list = self.mkdtemp()
header1 = os.path.join(header_list, 'header1')
header2 = os.path.join(header_list, 'header2')
self.write_file(header1)
self.write_file(header2)
headers = [header1, header2]
pkg_dir, dist = self.create_dist(headers=headers)
cmd = install_headers(dist)
self.assertEqual(cmd.get_inputs(), headers)
# let's run the command
cmd.install_dir = os.path.join(pkg_dir, 'inst')
cmd.ensure_finalized()
cmd.run()
# let's check the results
self.assertEqual(len(cmd.get_outputs()), 2)
def test_suite():
return unittest.makeSuite(InstallHeadersTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-3.0 |
ujenmr/ansible | lib/ansible/module_utils/network/fortimanager/fortimanager.py | 9 | 20630 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Fortinet, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils.network.fortimanager.common import FMGR_RC
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import scrub_dict
# check for pyFMG lib - DEPRECATING
try:
from pyFMG.fortimgr import FortiManager
HAS_PYFMGR = True
except ImportError:
HAS_PYFMGR = False
# check for debug lib
try:
from ansible.module_utils.network.fortimanager.fortimanager_debug import debug_dump
HAS_FMGR_DEBUG = True
except ImportError:
HAS_FMGR_DEBUG = False
# BEGIN HANDLER CLASSES
class FortiManagerHandler(object):
def __init__(self, conn, check_mode=False):
self._conn = conn
self._check_mode = check_mode
self._tools = FMGRCommon
def process_request(self, url, datagram, method):
"""
Formats and Runs the API Request via Connection Plugin. Streamlined for use FROM Modules.
:param url: Connection URL to access
:type url: string
:param datagram: The prepared payload for the API Request in dictionary format
:type datagram: dict
:param method: The preferred API Request method (GET, ADD, POST, etc....)
:type method: basestring
:return: Dictionary containing results of the API Request via Connection Plugin
:rtype: dict
"""
data = self._tools.format_request(method, url, **datagram)
response = self._conn.send_request(method, data)
if HAS_FMGR_DEBUG:
try:
debug_dump(response, datagram, url, method)
except BaseException:
pass
return response
def govern_response(self, module, results, msg=None, good_codes=None,
stop_on_fail=None, stop_on_success=None, skipped=None,
changed=None, unreachable=None, failed=None, success=None, changed_if_success=None,
ansible_facts=None):
"""
This function will attempt to apply default values to canned responses from FortiManager we know of.
This saves time, and turns the response in the module into a "one-liner", while still giving us...
the flexibility to directly use return_response in modules if we have too. This function saves repeated code.
:param module: The Ansible Module CLASS object, used to run fail/exit json
:type module: object
:param msg: An overridable custom message from the module that called this.
:type msg: string
:param results: A dictionary object containing an API call results
:type results: dict
:param good_codes: A list of exit codes considered successful from FortiManager
:type good_codes: list
:param stop_on_fail: If true, stops playbook run when return code is NOT IN good codes (default: true)
:type stop_on_fail: boolean
:param stop_on_success: If true, stops playbook run when return code is IN good codes (default: false)
:type stop_on_success: boolean
:param changed: If True, tells Ansible that object was changed (default: false)
:type skipped: boolean
:param skipped: If True, tells Ansible that object was skipped (default: false)
:type skipped: boolean
:param unreachable: If True, tells Ansible that object was unreachable (default: false)
:type unreachable: boolean
:param failed: If True, tells Ansible that execution was a failure. Overrides good_codes. (default: false)
:type unreachable: boolean
:param success: If True, tells Ansible that execution was a success. Overrides good_codes. (default: false)
:type unreachable: boolean
:param changed_if_success: If True, defaults to changed if successful if you specify or not"
:type changed_if_success: boolean
:param ansible_facts: A prepared dictionary of ansible facts from the execution.
:type ansible_facts: dict
"""
if module is None and results is None:
raise FMGBaseException("govern_response() was called without a module and/or results tuple! Fix!")
# Get the Return code from results
try:
rc = results[0]
except BaseException:
raise FMGBaseException("govern_response() was called without the return code at results[0]")
# init a few items
rc_data = None
# Get the default values for the said return code.
try:
rc_codes = FMGR_RC.get('fmgr_return_codes')
rc_data = rc_codes.get(rc)
except BaseException:
pass
if not rc_data:
rc_data = {}
# ONLY add to overrides if not none -- This is very important that the keys aren't added at this stage
# if they are empty. And there aren't that many, so let's just do a few if then statements.
if good_codes is not None:
rc_data["good_codes"] = good_codes
if stop_on_fail is not None:
rc_data["stop_on_fail"] = stop_on_fail
if stop_on_success is not None:
rc_data["stop_on_success"] = stop_on_success
if skipped is not None:
rc_data["skipped"] = skipped
if changed is not None:
rc_data["changed"] = changed
if unreachable is not None:
rc_data["unreachable"] = unreachable
if failed is not None:
rc_data["failed"] = failed
if success is not None:
rc_data["success"] = success
if changed_if_success is not None:
rc_data["changed_if_success"] = changed_if_success
if results is not None:
rc_data["results"] = results
if msg is not None:
rc_data["msg"] = msg
if ansible_facts is None:
rc_data["ansible_facts"] = {}
else:
rc_data["ansible_facts"] = ansible_facts
return self.return_response(module=module,
results=results,
msg=rc_data.get("msg", "NULL"),
good_codes=rc_data.get("good_codes", (0,)),
stop_on_fail=rc_data.get("stop_on_fail", True),
stop_on_success=rc_data.get("stop_on_success", False),
skipped=rc_data.get("skipped", False),
changed=rc_data.get("changed", False),
changed_if_success=rc_data.get("changed_if_success", False),
unreachable=rc_data.get("unreachable", False),
failed=rc_data.get("failed", False),
success=rc_data.get("success", False),
ansible_facts=rc_data.get("ansible_facts", dict()))
@staticmethod
def return_response(module, results, msg="NULL", good_codes=(0,),
stop_on_fail=True, stop_on_success=False, skipped=False,
changed=False, unreachable=False, failed=False, success=False, changed_if_success=True,
ansible_facts=()):
"""
This function controls the logout and error reporting after an method or function runs. The exit_json for
ansible comes from logic within this function. If this function returns just the msg, it means to continue
execution on the playbook. It is called from the ansible module, or from the self.govern_response function.
:param module: The Ansible Module CLASS object, used to run fail/exit json
:type module: object
:param msg: An overridable custom message from the module that called this.
:type msg: string
:param results: A dictionary object containing an API call results
:type results: dict
:param good_codes: A list of exit codes considered successful from FortiManager
:type good_codes: list
:param stop_on_fail: If true, stops playbook run when return code is NOT IN good codes (default: true)
:type stop_on_fail: boolean
:param stop_on_success: If true, stops playbook run when return code is IN good codes (default: false)
:type stop_on_success: boolean
:param changed: If True, tells Ansible that object was changed (default: false)
:type skipped: boolean
:param skipped: If True, tells Ansible that object was skipped (default: false)
:type skipped: boolean
:param unreachable: If True, tells Ansible that object was unreachable (default: false)
:type unreachable: boolean
:param failed: If True, tells Ansible that execution was a failure. Overrides good_codes. (default: false)
:type unreachable: boolean
:param success: If True, tells Ansible that execution was a success. Overrides good_codes. (default: false)
:type unreachable: boolean
:param changed_if_success: If True, defaults to changed if successful if you specify or not"
:type changed_if_success: boolean
:param ansible_facts: A prepared dictionary of ansible facts from the execution.
:type ansible_facts: dict
:return: A string object that contains an error message
:rtype: str
"""
# VALIDATION ERROR
if (len(results) == 0) or (failed and success) or (changed and unreachable):
module.exit_json(msg="Handle_response was called with no results, or conflicting failed/success or "
"changed/unreachable parameters. Fix the exit code on module. "
"Generic Failure", failed=True)
# IDENTIFY SUCCESS/FAIL IF NOT DEFINED
if not failed and not success:
if len(results) > 0:
if results[0] not in good_codes:
failed = True
elif results[0] in good_codes:
success = True
if len(results) > 0:
# IF NO MESSAGE WAS SUPPLIED, GET IT FROM THE RESULTS, IF THAT DOESN'T WORK, THEN WRITE AN ERROR MESSAGE
if msg == "NULL":
try:
msg = results[1]['status']['message']
except BaseException:
msg = "No status message returned at results[1][status][message], " \
"and none supplied to msg parameter for handle_response."
if failed:
# BECAUSE SKIPPED/FAILED WILL OFTEN OCCUR ON CODES THAT DON'T GET INCLUDED, THEY ARE CONSIDERED FAILURES
# HOWEVER, THEY ARE MUTUALLY EXCLUSIVE, SO IF IT IS MARKED SKIPPED OR UNREACHABLE BY THE MODULE LOGIC
# THEN REMOVE THE FAILED FLAG SO IT DOESN'T OVERRIDE THE DESIRED STATUS OF SKIPPED OR UNREACHABLE.
if failed and skipped:
failed = False
if failed and unreachable:
failed = False
if stop_on_fail:
module.exit_json(msg=msg, failed=failed, changed=changed, unreachable=unreachable, skipped=skipped,
results=results[1], ansible_facts=ansible_facts, rc=results[0],
invocation={"module_args": ansible_facts["ansible_params"]})
elif success:
if changed_if_success:
changed = True
success = False
if stop_on_success:
module.exit_json(msg=msg, success=success, changed=changed, unreachable=unreachable,
skipped=skipped, results=results[1], ansible_facts=ansible_facts, rc=results[0],
invocation={"module_args": ansible_facts["ansible_params"]})
return msg
def construct_ansible_facts(self, response, ansible_params, paramgram, *args, **kwargs):
"""
Constructs a dictionary to return to ansible facts, containing various information about the execution.
:param response: Contains the response from the FortiManager.
:type response: dict
:param ansible_params: Contains the parameters Ansible was called with.
:type ansible_params: dict
:param paramgram: Contains the paramgram passed to the modules' local modify function.
:type paramgram: dict
:param args: Free-form arguments that could be added.
:param kwargs: Free-form keyword arguments that could be added.
:return: A dictionary containing lots of information to append to Ansible Facts.
:rtype: dict
"""
facts = {
"response": response,
"ansible_params": scrub_dict(ansible_params),
"paramgram": scrub_dict(paramgram),
"connected_fmgr": self._conn.return_connected_fmgr()
}
if args:
facts["custom_args"] = args
if kwargs:
facts.update(kwargs)
return facts
##########################
# BEGIN DEPRECATED METHODS
##########################
# SOME OF THIS CODE IS DUPLICATED IN THE PLUGIN, BUT THOSE ARE PLUGIN SPECIFIC. THIS VERSION STILL ALLOWS FOR
# THE USAGE OF PYFMG FOR CUSTOMERS WHO HAVE NOT YET UPGRADED TO ANSIBLE 2.7
# LEGACY PYFMG METHODS START
# USED TO DETERMINE LOCK CONTEXT ON A FORTIMANAGER. A DATABASE LOCKING CONCEPT THAT NEEDS TO BE ACCOUNTED FOR.
class FMGLockContext(object):
"""
- DEPRECATING: USING CONNECTION MANAGER NOW INSTEAD. EVENTUALLY THIS CLASS WILL DISAPPEAR. PLEASE
- CONVERT ALL MODULES TO CONNECTION MANAGER METHOD.
- LEGACY pyFMG HANDLER OBJECT: REQUIRES A CHECK FOR PY FMG AT TOP OF PAGE
"""
def __init__(self, fmg):
self._fmg = fmg
self._locked_adom_list = list()
self._uses_workspace = False
self._uses_adoms = False
@property
def uses_workspace(self):
return self._uses_workspace
@uses_workspace.setter
def uses_workspace(self, val):
self._uses_workspace = val
@property
def uses_adoms(self):
return self._uses_adoms
@uses_adoms.setter
def uses_adoms(self, val):
self._uses_adoms = val
def add_adom_to_lock_list(self, adom):
if adom not in self._locked_adom_list:
self._locked_adom_list.append(adom)
def remove_adom_from_lock_list(self, adom):
if adom in self._locked_adom_list:
self._locked_adom_list.remove(adom)
def check_mode(self):
url = "/cli/global/system/global"
code, resp_obj = self._fmg.get(url, fields=["workspace-mode", "adom-status"])
try:
if resp_obj["workspace-mode"] != 0:
self.uses_workspace = True
except KeyError:
self.uses_workspace = False
try:
if resp_obj["adom-status"] == 1:
self.uses_adoms = True
except KeyError:
self.uses_adoms = False
def run_unlock(self):
for adom_locked in self._locked_adom_list:
self.unlock_adom(adom_locked)
def lock_adom(self, adom=None, *args, **kwargs):
if adom:
if adom.lower() == "global":
url = "/dvmdb/global/workspace/lock/"
else:
url = "/dvmdb/adom/{adom}/workspace/lock/".format(adom=adom)
else:
url = "/dvmdb/adom/root/workspace/lock"
code, respobj = self._fmg.execute(url, {}, *args, **kwargs)
if code == 0 and respobj["status"]["message"].lower() == "ok":
self.add_adom_to_lock_list(adom)
return code, respobj
def unlock_adom(self, adom=None, *args, **kwargs):
if adom:
if adom.lower() == "global":
url = "/dvmdb/global/workspace/unlock/"
else:
url = "/dvmdb/adom/{adom}/workspace/unlock/".format(adom=adom)
else:
url = "/dvmdb/adom/root/workspace/unlock"
code, respobj = self._fmg.execute(url, {}, *args, **kwargs)
if code == 0 and respobj["status"]["message"].lower() == "ok":
self.remove_adom_from_lock_list(adom)
return code, respobj
def commit_changes(self, adom=None, aux=False, *args, **kwargs):
if adom:
if aux:
url = "/pm/config/adom/{adom}/workspace/commit".format(adom=adom)
else:
if adom.lower() == "global":
url = "/dvmdb/global/workspace/commit/"
else:
url = "/dvmdb/adom/{adom}/workspace/commit".format(adom=adom)
else:
url = "/dvmdb/adom/root/workspace/commit"
return self._fmg.execute(url, {}, *args, **kwargs)
# DEPRECATED -- USE PLUGIN INSTEAD
class AnsibleFortiManager(object):
"""
- DEPRECATING: USING CONNECTION MANAGER NOW INSTEAD. EVENTUALLY THIS CLASS WILL DISAPPEAR. PLEASE
- CONVERT ALL MODULES TO CONNECTION MANAGER METHOD.
- LEGACY pyFMG HANDLER OBJECT: REQUIRES A CHECK FOR PY FMG AT TOP OF PAGE
"""
def __init__(self, module, ip=None, username=None, passwd=None, use_ssl=True, verify_ssl=False, timeout=300):
self.ip = ip
self.username = username
self.passwd = passwd
self.use_ssl = use_ssl
self.verify_ssl = verify_ssl
self.timeout = timeout
self.fmgr_instance = None
if not HAS_PYFMGR:
module.fail_json(msg='Could not import the python library pyFMG required by this module')
self.module = module
def login(self):
if self.ip is not None:
self.fmgr_instance = FortiManager(self.ip, self.username, self.passwd, use_ssl=self.use_ssl,
verify_ssl=self.verify_ssl, timeout=self.timeout, debug=False,
disable_request_warnings=True)
return self.fmgr_instance.login()
def logout(self):
if self.fmgr_instance.sid is not None:
self.fmgr_instance.logout()
def get(self, url, data):
return self.fmgr_instance.get(url, **data)
def set(self, url, data):
return self.fmgr_instance.set(url, **data)
def update(self, url, data):
return self.fmgr_instance.update(url, **data)
def delete(self, url, data):
return self.fmgr_instance.delete(url, **data)
def add(self, url, data):
return self.fmgr_instance.add(url, **data)
def execute(self, url, data):
return self.fmgr_instance.execute(url, **data)
def move(self, url, data):
return self.fmgr_instance.move(url, **data)
def clone(self, url, data):
return self.fmgr_instance.clone(url, **data)
##########################
# END DEPRECATED METHODS
##########################
| gpl-3.0 |
PrathapB/LTLMoP | src/lib/executeStrategy.py | 7 | 6765 | import fsa
import sys
import logging,random
import project
class ExecutorStrategyExtensions(object):
""" Extensions to Executor to allow for the strategy structure.
This class provides functions to update the outputs and to check for new states in every iteration.
"""
def __init__(self):
super(ExecutorStrategyExtensions, self).__init__()
self.last_next_states= []
self.next_state = None
self.current_region = None
self.next_region = None
def updateOutputs(self, state=None):
"""
Update the values of current outputs in our execution environment to reflect the output
proposition values associated with the given state
"""
if state is None:
state = self.current_state
for key, output_val in state.getOutputs().iteritems():
# Skip any region
if 'region' == key: continue
if key not in self.current_outputs.keys() or output_val != self.current_outputs[key]:
# The state of this output proposition has changed!
self.postEvent("INFO", "Output proposition \"%s\" is now %s!" % (key, str(output_val)))
# Run any actuator handlers if appropriate
if key in self.proj.enabled_actuators:
self.hsub.setActuatorValue({key:output_val})
self.current_outputs[key] = output_val
def runStrategyIteration(self):
"""
Run, run, run the automaton! (For one evaluation step)
"""
# find current region
self.current_region = self.strategy.current_state.getPropValue('region')
# Take a snapshot of our current sensor readings
sensor_state = self.hsub.getSensorValue(self.proj.enabled_sensors)
# Let's try to transition
# TODO: set current state so that we don't need to call from_state
next_states = self.strategy.findTransitionableStates(sensor_state, from_state= self.strategy.current_state)
# Make sure we have somewhere to go
if len(next_states) == 0:
# Well darn!
logging.error("Could not find a suitable state to transition to!")
return
# See if we're beginning a new transition
if next_states != self.last_next_states:
# NOTE: The last_next_states comparison is also to make sure we don't
# choose a different random next-state each time, in the case of multiple choices
self.last_next_states = next_states
# Only allow self-transitions if that is the only option!
if len(next_states) > 1 and self.strategy.current_state in next_states:
next_states.remove(self.strategy.current_state)
self.next_state = random.choice(next_states)
self.next_region = self.next_state.getPropValue('region')
self.postEvent("INFO", "Currently pursuing goal #{}".format(self.next_state.goal_id))
# See what we, as the system, need to do to get to this new state
self.transition_contains_motion = self.next_region is not None and (self.next_region != self.current_region)
if self.proj.compile_options['fastslow']:
# Run actuators before motion
self.updateOutputs(self.next_state)
if self.transition_contains_motion:
# We're going to a new region
self.postEvent("INFO", "Heading to region %s..." % self.next_region.name)
self.arrived = False
if not self.arrived:
# Move one step towards the next region (or stay in the same region)
self.arrived = self.hsub.gotoRegion(self.current_region, self.next_region)
# Check for completion of motion
if self.arrived and self.next_state != self.strategy.current_state:
# TODO: Check to see whether actually inside next region that we expected
if self.transition_contains_motion:
self.postEvent("INFO", "Crossed border from %s to %s!" % (self.current_region.name, self.next_region.name))
if not self.proj.compile_options['fastslow']:
# Run actuators after motion
self.updateOutputs(self.next_state)
self.strategy.current_state = self.next_state
self.last_next_states = [] # reset
self.postEvent("INFO", "Now in state %s (z = %s)" % (self.strategy.current_state.state_id, self.strategy.current_state.goal_id))
def HSubGetSensorValue(self,sensorList):
"""
This function takes in a list of sensorName and returns the dictionary of the propositions with values.
This will be replaced by a function in handlerSubsystem.py in the future.
Input:
sensorList (list): list of the sensor propositions
Output:
sensor_state (dict): dictionary containing the sensor name with the corresponding sensor value.
"""
self.h_instance = self.proj.h_instance
sensor_state = {}
for sensor in self.proj.enabled_sensors:
sensor_state[sensor] = eval(self.proj.sensor_handler[sensor], {'self':self,'initial':False})
return sensor_state
def HSubSetActuatorValue(self, actuatorName, setValue):
"""
This function takes in the name of an actuator and set its value.
This will be replaced by a function in handlerSubsystem.py in the future.
actuatorName (string): name of the actuator proposition.
setValue (bool) : the value to set the proposition to.
"""
self.motionControlGoToRegionWrapper(self.current_region, self.current_region) # Stop, in case actuation takes time
initial=False
new_val = setValue #taken by the actuator argument, has to be called new_val
exec(self.proj.actuator_handler[actuatorName])
def motionControlGoToRegionWrapper (self,current_region, next_region):
"""
This function wraps around the original goToRegion in motionControl handler. It takes in the current
region object and the next region object, and at the end returns a boolean value of the arrival status
to the next region.
Inputs:
current_region (region object): current region object
next_region (region object): next region object
Ouputs:
arrived (bool): status of arrival to the next region
"""
current_region_no = self.proj.rfi.regions.index(current_region)
next_region_no = self.proj.rfi.regions.index(next_region)
arrived = self.proj.h_instance['motionControl'].gotoRegion(current_region_no, next_region_no)
return arrived
| gpl-3.0 |
jzoldak/edx-platform | openedx/core/djangoapps/oauth_dispatch/tests/factories.py | 23 | 1067 | # pylint: disable=missing-docstring
from datetime import datetime, timedelta
import factory
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyText
import pytz
from oauth2_provider.models import Application, AccessToken, RefreshToken
from student.tests.factories import UserFactory
class ApplicationFactory(DjangoModelFactory):
class Meta(object):
model = Application
user = factory.SubFactory(UserFactory)
client_id = factory.Sequence(u'client_{0}'.format)
client_secret = 'some_secret'
client_type = 'confidential'
authorization_grant_type = 'Client credentials'
class AccessTokenFactory(DjangoModelFactory):
class Meta(object):
model = AccessToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)
expires = datetime.now(pytz.UTC) + timedelta(days=1)
class RefreshTokenFactory(DjangoModelFactory):
class Meta(object):
model = RefreshToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)
| agpl-3.0 |
tuos/FlowAndCorrelations | mc/step2/src/RECOHI_mc.py | 1 | 4615 | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step3 --conditions auto:run2_mc_hi -s RAW2DIGI,L1Reco,RECO -n 4 --eventcontent RECODEBUG --runUnscheduled --scenario HeavyIons --datatier GEN-SIM-RECO --beamspot NominalHICollision2015 --customise SLHCUpgradeSimulations/Configuration/postLS1Customs.customisePostLS1_HI,RecoHI/Configuration/customise_RecoMergedTrackCollection.customiseAddMergedTrackCollection --io RECOHI_mc.io --python RECOHI_mc.py --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('RECO')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContentHeavyIons_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(4)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
#'/store/user/tuos/EPOSLHC/crab/Nov2017PbPb5020GeV/200kv1/MinBias/CRAB3_EPOSLHC_Nov2017_PbPb5020GeV_accre200kv1/171129_030608/0000/ReggeGribovPartonMC_EposLHC_5020GeV_PbPb_cfi_py_GEN_SIM_11.root'
#'file:/home/tuos/step2_DIGI_L1_DIGI2RAW_HLT_RAW2DIGI_L1Reco_1000_1_X6L.root'
'file:step2_DIGI_L1_DIGI2RAW_HLT_RAW2DIGI_L1Reco.root'
),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
allowUnscheduled = cms.untracked.bool(True)
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step3 nevts:4'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RECODEBUGoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM-RECO'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('step3_RAW2DIGI_L1Reco_RECO.root'),
outputCommands = process.RECODEBUGEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc_hi', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstructionHeavyIons)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECODEBUGoutput_step = cms.EndPath(process.RECODEBUGoutput)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.endjob_step,process.RECODEBUGoutput_step)
# customisation of the process.
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.postLS1Customs
from SLHCUpgradeSimulations.Configuration.postLS1Customs import customisePostLS1_HI
#call to customisation function customisePostLS1_HI imported from SLHCUpgradeSimulations.Configuration.postLS1Customs
process = customisePostLS1_HI(process)
# Automatic addition of the customisation function from RecoHI.Configuration.customise_RecoMergedTrackCollection
from RecoHI.Configuration.customise_RecoMergedTrackCollection import customiseAddMergedTrackCollection
#call to customisation function customiseAddMergedTrackCollection imported from RecoHI.Configuration.customise_RecoMergedTrackCollection
process = customiseAddMergedTrackCollection(process)
# End of customisation functions
#do not add changes to your config after this point (unless you know what you are doing)
from FWCore.ParameterSet.Utilities import convertToUnscheduled
process=convertToUnscheduled(process)
from FWCore.ParameterSet.Utilities import cleanUnscheduled
process=cleanUnscheduled(process)
| mit |
ieee8023/PDFViewer | jni/mupdf/freetype/src/tools/chktrcmp.py | 381 | 3826 | #!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: suzuki toshiya, 2009
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = [ "src" ]
TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ]
# --------------------------------------------------------------
# Parse command line options
#
for i in range( 1, len( sys.argv ) ):
if sys.argv[i].startswith( "--help" ):
print "Usage: %s [option]" % sys.argv[0]
print "Search used-but-defined and defined-but-not-used trace_XXX macros"
print ""
print " --help:"
print " Show this help"
print ""
print " --src-dirs=dir1:dir2:..."
print " Specify the directories of C source files to be checked"
print " Default is %s" % ":".join( SRC_FILE_DIRS )
print ""
print " --def-files=file1:file2:..."
print " Specify the header files including FT_TRACE_DEF()"
print " Default is %s" % ":".join( TRACE_DEF_FILES )
print ""
exit(0)
if sys.argv[i].startswith( "--src-dirs=" ):
SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
elif sys.argv[i].startswith( "--def-files=" ):
TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' )
for d in SRC_FILE_DIRS:
for ( p, dlst, flst ) in os.walk( d ):
for f in flst:
if c_pathname_pat.match( f ) != None:
src_pathname = os.path.join( p, f )
line_num = 0
for src_line in open( src_pathname, 'r' ):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match( src_line ) != None:
component_name = trace_use_pat.sub( '', src_line )
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
else:
USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
trace_def_pat_cls = re.compile( '[ \t\)].*$' )
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open( f, 'r' ):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match( hdr_line ) != None:
component_name = trace_def_pat_opn.sub( '', hdr_line )
component_name = trace_def_pat_cls.sub( '', component_name )
if component_name in KNOWN_COMPONENT:
print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
( component_name, KNOWN_COMPONENT[component_name], line_num )
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
( os.path.basename( f ), line_num )
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print "# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
print "# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
| gpl-2.0 |
moodpulse/l2 | refprocessor/processor.py | 1 | 1703 | from typing import Tuple, Union, List
from appconf.manager import SettingManager
from refprocessor.age_parser import AgeRight
from refprocessor.common import ValueRange, RANGE_IN
from refprocessor.result_parser import ResultRight
class RefProcessor:
def __init__(self, ref: dict, age: List[int]):
actual_key, actual_ref, actual_raw_ref = RefProcessor.get_actual_ref(ref, age)
self.key = actual_key
self.ref = actual_ref
self.raw_ref = actual_raw_ref
@staticmethod
def get_actual_ref(ref: dict, age: List[int]) -> Union[Tuple[str, ResultRight, str], Tuple[None, None, None]]:
for k in ref:
age_rights = AgeRight(k)
if age_rights.test(age):
return k, ResultRight(ref[k]), ref[k]
return None, None, None
def get_active_ref(self, raw_ref=True, single=False):
if raw_ref:
if single:
show_only_needed_ref = SettingManager.get("show_only_needed_ref", default='True', default_type='b')
if not show_only_needed_ref or not self.raw_ref:
return None
show_full_needed_ref = SettingManager.get("show_full_needed_ref", default='False', default_type='b')
if show_full_needed_ref:
return {self.key: self.raw_ref}
return {'Все': self.raw_ref}
return self.raw_ref
if isinstance(self.ref, ResultRight):
return self.ref
return ValueRange((0, ")"), (0, ")"))
def calc(self, value):
if isinstance(self.ref, ResultRight):
return self.ref.test(value)
return ResultRight.RESULT_MODE_NORMAL, RANGE_IN
| mit |
ArcherSys/ArcherSys | Lib/site-packages/django/db/models/expressions.py | 30 | 33239 | import copy
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q, refs_aggregate
from django.utils import six, timezone
from django.utils.functional import cached_property
class Combinable(object):
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rdiv__(self, other): # Python 2 compatibility
return type(self).__rtruediv__(self, other)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression(object):
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
def __init__(self, output_field=None):
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, six.string_types) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(Expression, self).as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self):
"""
Hook used by Field.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def refs_aggregate(self, existing_aggregates):
"""
Does this expression contain a reference to some of the
existing aggregates? If so, returns the aggregate and also
the lookup parts that *weren't* found. So, if
exsiting_aggregates = {'max_id': Max('id')}
self.name = 'max_id'
queryset.filter(max_id__range=[10,100])
then this method will return Max('id') and those parts of the
name that weren't found. In this case `max_id` is found and the range
portion is returned as ('range',).
"""
for node in self.get_source_expressions():
agg, lookup = node.refs_aggregate(existing_aggregates)
if agg:
return agg, lookup
return False, ()
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
def reverse_ordering(self):
return self
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super(CombinedExpression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField')
or (rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def refs_aggregate(self, existing_aggregates):
return refs_aggregate(self.name.split(LOOKUP_SEP), existing_aggregates)
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
class Func(Expression):
"""
A SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
def __init__(self, *expressions, **extra):
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
if function is None:
self.extra['function'] = self.extra.get('function', self.function)
else:
self.extra['function'] = function
self.extra['expressions'] = self.extra['field'] = self.arg_joiner.join(sql_parts)
template = template or self.extra.get('template', self.template)
return template % self.extra, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(Value, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if (connection.features.has_native_duration_field and
connection.features.driver_supports_timedelta_args):
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super(RawSQL, self).__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Random(Expression):
def __init__(self):
super(Random, self).__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super(Col, self).__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super(ExpressionWrapper, self).__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super(When, self).__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
template_params = {}
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
default = extra.pop('default', None)
output_field = extra.pop('output_field', None)
super(Case, self).__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None, extra=None):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = dict(extra) if extra else {}
case_parts = []
sql_params = []
for case in self.cases:
case_sql, case_params = compiler.compile(case)
case_parts.append(case_sql)
sql_params.extend(case_params)
template_params['cases'] = self.case_joiner.join(case_parts)
default_sql, default_params = compiler.compile(self.default)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or self.template
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Date(Expression):
"""
Add a date selection column.
"""
def __init__(self, lookup, lookup_type):
super(Date, self).__init__(output_field=fields.DateField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.lookup, self.lookup_type)
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateField), "%r isn't a DateField." % field.name
if settings.USE_TZ:
assert not isinstance(field, fields.DateTimeField), (
"%r is a DateTimeField, not a DateField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.date_trunc_sql(self.lookup_type, sql), []
def copy(self):
copy = super(Date, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
return copy
def convert_value(self, value, expression, connection, context):
if isinstance(value, datetime.datetime):
value = value.date()
return value
class DateTime(Expression):
"""
Add a datetime selection column.
"""
def __init__(self, lookup, lookup_type, tzinfo):
super(DateTime, self).__init__(output_field=fields.DateTimeField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
if tzinfo is None:
self.tzname = None
else:
self.tzname = timezone._get_timezone_name(tzinfo)
self.tzinfo = tzinfo
def __repr__(self):
return "{}({}, {}, {})".format(
self.__class__.__name__, self.lookup, self.lookup_type, self.tzinfo)
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateTimeField), (
"%r isn't a DateTimeField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.datetime_trunc_sql(self.lookup_type, sql, self.tzname)
def copy(self):
copy = super(DateTime, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
copy.tzname = self.tzname
return copy
def convert_value(self, value, expression, connection, context):
if settings.USE_TZ:
if value is None:
raise ValueError(
"Database returned an invalid value in QuerySet.datetimes(). "
"Are time zone definitions for your database and pytz installed?"
)
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo)
return value
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False):
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {'expression': expression_sql}
placeholders['ordering'] = 'DESC' if self.descending else 'ASC'
return (self.template % placeholders).rstrip(), params
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
| mit |
TacticalGoat/reddit | AutoContributor/autocontributor.py | 1 | 2306 | #/u/GoldenSights
import praw # simple interface to the reddit API, also handles rate limiting of requests
import time
import sqlite3
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "GoldTesting"
#The subreddit you are acting on.
SUBJECTLINE = ['submission']
#If the modmail subject line contains one of these keywords, he will be added
MAXPOSTS = 100
#The number of modmails to collect at once. 100 can be fetched with a single request
WAIT = 30
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
'''All done!'''
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)')
print('Loaded Users table')
sql.commit()
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
WAITS = str(WAIT)
print('Logging in.')
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def scanmessages():
print('Getting ' + SUBREDDIT + ' modmail')
subreddit = r.get_subreddit(SUBREDDIT)
modmail = list(subreddit.get_mod_mail(limit=MAXPOSTS))
for message in modmail:
cur.execute('SELECT * FROM oldposts WHERE ID=?', [message.fullname])
if not cur.fetchone():
print(message.fullname)
try:
mauthor = message.author.name
msubject = message.subject.lower()
if any(keyword.lower() in msubject for keyword in SUBJECTLINE):
print('\tApproving ' + mauthor)
subreddit.add_contributor(mauthor)
message.mark_as_read()
except AttributeError:
print('Failed to fetch username')
cur.execute('INSERT INTO oldposts VALUES(?)', [message.fullname])
sql.commit()
while True:
try:
scanmessages()
except Exception as e:
print('ERROR: ' + str(e))
sql.commit()
print('Running again in ' + WAITS + ' seconds \n_________\n')
time.sleep(WAIT)
| mit |
wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/pip/wheel.py | 338 | 32010 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import functools
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.compat import expanduser
from pip.download import path_to_url, unpack_url
from pip.exceptions import (
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, captured_stdout, rmtree, read_chunks,
)
from pip.utils.ui import open_spinner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = canonicalize_name(package_name)
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path))
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.optionxform = lambda option: option
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False, prefix=None):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated,
prefix=prefix,
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return os.path.relpath(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
canonicalize_name(s).startswith(
canonicalize_name(req.name))):
assert not info_dir, ('Multiple .dist-info directories: ' +
destsubdir + ', ' +
', '.join(info_dir))
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
if entry.suffix is None:
raise InstallationError(
"Invalid script entry point: %s for req: %s - A callable "
"suffix is required. Cf https://packaging.python.org/en/"
"latest/distributing.html#console-scripts for more "
"information." % (entry, req)
)
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
# Record pip as the installer
installer = os.path.join(info_dir[0], 'INSTALLER')
temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip')
with open(temp_installer, 'wb') as installer_file:
installer_file.write(b'pip\n')
shutil.move(temp_installer, installer)
generated.append(installer)
# Record details of all files installed
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((normpath(f, lib_dir), h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir, python_tag=None):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd, python_tag=python_tag):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
pass
# Ignore return, we can't do anything else useful.
self._clean_one(req)
return None
finally:
rmtree(tempd)
def _base_setup_args(self, req):
return [
sys.executable, "-u", '-c',
SETUPTOOLS_SHIM % req.setup_py
] + list(self.global_options)
def __build_one(self, req, tempd, python_tag=None):
base_args = self._base_setup_args(req)
spin_message = 'Running setup.py bdist_wheel for %s' % (req.name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
if python_tag is not None:
wheel_args += ["--python-tag", python_tag]
try:
call_subprocess(wheel_args, cwd=req.setup_py_dir,
show_stdout=False, spinner=spinner)
return True
except:
spinner.finish("error")
logger.error('Failed building wheel for %s', req.name)
return False
def _clean_one(self, req):
base_args = self._base_setup_args(req)
logger.info('Running setup.py clean for %s', req.name)
clean_args = base_args + ['clean', '--all']
try:
call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif autobuilding and req.editable:
pass
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
canonicalize_name(req.name)):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
python_tag = None
if autobuilding:
python_tag = pep425tags.implementation_tag
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(
req, output_dir,
python_tag=python_tag,
)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
| gpl-3.0 |
gramps-project/gramps | gramps/plugins/quickview/linkreferences.py | 11 | 2320 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
"""
Display link references for a note
"""
from gramps.gen.simple import SimpleAccess, SimpleDoc
from gramps.gui.plug.quick import QuickTable
from gramps.gen.lib import StyledTextTagType
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
def run(database, document, obj):
"""
Display link references for this note.
"""
# setup the simple access functions
sdb = SimpleAccess(database)
sdoc = SimpleDoc(document)
stab = QuickTable(sdb)
# display the title
sdoc.title(_("Link References for this note"))
sdoc.paragraph("\n")
stab.columns(_("Type"), _("Reference"), _("Link check"))
for (ldomain, ltype, lprop, lvalue) in obj.get_links():
if ldomain == "gramps":
tagtype = _(ltype)
ref_obj = sdb.get_link(ltype, lprop, lvalue)
if ref_obj:
tagvalue = ref_obj
tagcheck = _("Ok")
else:
tagvalue = lvalue
tagcheck = _("Failed: missing object")
else:
tagtype = _("Internet")
tagvalue = lvalue
tagcheck = ""
stab.row(tagtype, tagvalue, tagcheck)
if stab.get_row_count() > 0:
stab.write(sdoc)
document.has_data = True
else:
sdoc.paragraph(_("No link references for this note"))
sdoc.paragraph("")
document.has_data = False
sdoc.paragraph("")
| gpl-2.0 |
mganeva/mantid | Framework/PythonInterface/test/python/plugins/algorithms/CreateWorkspaceTest.py | 1 | 4168 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.api import MatrixWorkspace, AnalysisDataService
from mantid.simpleapi import CreateWorkspace
from testhelpers import run_algorithm
import numpy as np
class CreateWorkspaceTest(unittest.TestCase):
def test_create_with_1D_numpy_array(self):
x = np.array([1.,2.,3.,4.])
y = np.array([1.,2.,3.])
e = np.sqrt(np.array([1.,2.,3.]))
wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=1,UnitX='TOF')
self.assertTrue(isinstance(wksp, MatrixWorkspace))
self.assertEquals(wksp.getNumberHistograms(), 1)
self.assertEquals(len(wksp.readY(0)), len(y))
self.assertEquals(len(wksp.readX(0)), len(x))
self.assertEquals(len(wksp.readE(0)), len(e))
for index in range(len(y)):
self.assertEquals(wksp.readY(0)[index], y[index])
self.assertEquals(wksp.readE(0)[index], e[index])
self.assertEquals(wksp.readX(0)[index], x[index])
# Last X value
self.assertEquals(wksp.readX(0)[len(x)-1], x[len(x)-1])
AnalysisDataService.remove("wksp")
def test_create_with_2D_numpy_array(self):
x = np.array([1.,2.,3.,4.])
y = np.array([[1.,2.,3.],[4.,5.,6.]])
e = np.sqrt(y)
wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=2,UnitX='TOF')
self.assertTrue(isinstance(wksp, MatrixWorkspace))
self.assertEquals(wksp.getNumberHistograms(), 2)
for i in [0,1]:
for j in range(len(y[0])):
self.assertEquals(wksp.readY(i)[j], y[i][j])
self.assertEquals(wksp.readE(i)[j], e[i][j])
self.assertEquals(wksp.readX(i)[j], x[j])
# Last X value
self.assertEquals(wksp.readX(i)[len(x)-1], x[len(x)-1])
AnalysisDataService.remove("wksp")
def test_with_data_from_other_workspace(self):
wsname = 'LOQ'
x1 = np.array([1.,2.,3.,4.])
y1 = np.array([[1.,2.,3.],[4.,5.,6.]])
e1 = np.sqrt(y1)
loq = CreateWorkspace(DataX=x1, DataY=y1,DataE=e1,NSpec=2,UnitX='Wavelength')
x2 = loq.extractX()
y2 = loq.extractY()
e2 = loq.extractE()
wksp = CreateWorkspace(DataX=x2, DataY=y2,DataE=e2,NSpec=2,UnitX='Wavelength')
self.assertTrue(isinstance(wksp, MatrixWorkspace))
self.assertEquals(wksp.getNumberHistograms(), 2)
for i in [0,1]:
for j in range(len(y2[0])):
self.assertEquals(wksp.readY(i)[j], loq.readY(i)[j])
self.assertEquals(wksp.readE(i)[j], loq.readE(i)[j])
self.assertEquals(wksp.readX(i)[j], loq.readX(i)[j])
# Last X value
self.assertEquals(wksp.readX(i)[len(x2)-1], loq.readX(i)[len(x2)-1])
AnalysisDataService.remove("wksp")
def test_create_with_numerical_vertical_axis_values(self):
data = [1.,2.,3.]
axis_values = [5,6,7]
alg = run_algorithm("CreateWorkspace", DataX=data, DataY=data, NSpec=3,VerticalAxisUnit='MomentumTransfer',
VerticalAxisValues=axis_values,child=True)
wksp = alg.getProperty("OutputWorkspace").value
for i in range(len(axis_values)):
self.assertEquals(wksp.getAxis(1).getValue(i), axis_values[i])
def test_create_with_numpy_vertical_axis_values(self):
data = [1.,2.,3.]
axis_values = np.array([6.,7.,8.])
alg = run_algorithm("CreateWorkspace", DataX=data, DataY=data, NSpec=3,VerticalAxisUnit='MomentumTransfer',
VerticalAxisValues=axis_values,child=True)
wksp = alg.getProperty("OutputWorkspace").value
for i in range(len(axis_values)):
self.assertEquals(wksp.getAxis(1).getValue(i), axis_values[i])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
openhatch/oh-greenhouse | greenhouse/migrations/0008_auto__add_field_people_control_group.py | 1 | 9876 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'People.control_group'
db.add_column(u'people', 'control_group',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'People.control_group'
db.delete_column(u'people', 'control_group')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': u"orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'uploads.people': {
'Meta': {'object_name': 'People', 'db_table': "u'people'"},
'contacted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'control_group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.TextField', [], {'unique': 'True', 'blank': 'True'}),
'first_upload': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['uploads.Uploads']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_upload': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['uploads.Uploads']"}),
'name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'total_uploads': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'ubuntu_dev': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'uploads.udd': {
'Meta': {'unique_together': "(('source', 'version'),)", 'object_name': 'UDD', 'db_table': "u'upload_history'", 'managed': 'False'},
'changed_by': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changed_by_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changed_by_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'primary_key': 'True'}),
'distribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fingerprint': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'key_id': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'nmu': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'signed_by': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'signed_by_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'signed_by_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {}),
'version': ('django.db.models.fields.TextField', [], {})
},
u'uploads.uploads': {
'Meta': {'unique_together': "(('package', 'version'),)", 'object_name': 'Uploads', 'db_table': "u'uploads'"},
'email_changer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_sponsor': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_changer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name_sponsor': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'package': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'release': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'uploads.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['uploads'] | agpl-3.0 |
Milad-Rakhsha/chrono | src/demos/python/irrlicht/demo_IRR_rot_spring.py | 3 | 4788 | # =============================================================================
# PROJECT CHRONO - http:#projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# Simple example demonstrating the use of ChLinkRotSpringCB.
#
# Recall that Irrlicht uses a left-hand frame, so everything is rendered with
# left and right flipped.
#
# =============================================================================
import pychrono as chrono
import pychrono.irrlicht as irr
import math as m
# =============================================================================
spring_coef = 40
damping_coef = 2
rest_angle = m.pi / 6
# =============================================================================
# Functor class implementing the torque for a ChLinkRotSpringCB link.
class MySpringTorque(chrono.TorqueFunctor):
def __init__(self):
super(MySpringTorque, self).__init__()
def __call__(self, #
time, # current time
angle, # relative angle of rotation
vel, # relative angular speed
link): # back-pointer to associated link
torque = -spring_coef * (angle - rest_angle) - damping_coef * vel
return torque
# =============================================================================
print("Copyright (c) 2017 projectchrono.org")
system = chrono.ChSystemNSC()
system.Set_G_acc(chrono.ChVectorD(0, 0, 0))
# Revolute joint frame
rev_rot = chrono.Q_from_AngX(m.pi / 6.0)
rev_dir = rev_rot.GetZaxis()
rev_pos = chrono.ChVectorD(+1, 0, 0)
# Create ground body
ground = chrono.ChBody()
system.AddBody(ground)
ground.SetIdentifier(-1)
ground.SetBodyFixed(True)
ground.SetCollide(False)
# Visualization for revolute joint
cyl_rev = chrono.ChCylinderShape()
cyl_rev.GetCylinderGeometry().p1 = rev_pos + rev_dir * 0.2
cyl_rev.GetCylinderGeometry().p2 = rev_pos - rev_dir * 0.2
cyl_rev.GetCylinderGeometry().rad = 0.1
ground.AddAsset(cyl_rev)
# Offset from joint to body COM
offset = chrono.ChVectorD(1.5, 0, 0)
# Consistent initial velocities
omega = 5.0
ang_vel = rev_dir * omega
lin_vel = ang_vel % offset
# Create pendulum body
body = chrono.ChBody()
system.AddBody(body)
body.SetPos(rev_pos + offset)
body.SetPos_dt(lin_vel)
body.SetWvel_par(ang_vel)
body.SetIdentifier(1)
body.SetBodyFixed(False)
body.SetCollide(False)
body.SetMass(1)
body.SetInertiaXX(chrono.ChVectorD(1, 1, 1))
# Attach visualization assets
sph = chrono.ChSphereShape()
sph.GetSphereGeometry().rad = 0.3
body.AddAsset(sph)
cyl = chrono.ChCylinderShape()
cyl.GetCylinderGeometry().p1 = chrono.ChVectorD(-1.5, 0, 0)
cyl.GetCylinderGeometry().p2 = chrono.ChVectorD(0, 0, 0)
cyl.GetCylinderGeometry().rad = 0.1
body.AddAsset(cyl)
col = chrono.ChColorAsset()
col.SetColor(chrono.ChColor(0.7, 0.8, 0.8))
body.AddAsset(col)
# Create revolute joint between body and ground
rev = chrono.ChLinkLockRevolute()
rev.Initialize(body, ground, chrono.ChCoordsysD(rev_pos, rev_rot))
system.AddLink(rev)
# Create the rotational spring between body and ground
torque = MySpringTorque()
spring = chrono.ChLinkRotSpringCB()
spring.Initialize(body, ground, chrono.ChCoordsysD(rev_pos, rev_rot))
spring.RegisterTorqueFunctor(torque)
system.AddLink(spring);
# Create the Irrlicht application
application = irr.ChIrrApp(system, "ChLinkRotSpringCB demo", irr.dimension2du(800, 600))
application.AddTypicalLogo()
application.AddTypicalSky()
application.AddTypicalLights()
application.AddTypicalCamera(irr.vector3df(3, 1, 3))
application.AssetBindAll()
application.AssetUpdateAll()
# Simulation loop
application.SetTimestep(0.001)
frame = 0
while (application.GetDevice().run()) :
application.BeginScene()
application.DrawAll()
irr.drawAllCOGs(system, application.GetVideoDriver(), 1.0)
irr.drawAllLinkframes(system, application.GetVideoDriver(), 1.5)
application.DoStep()
if (frame % 50 == 0) :
print('{:.6}'.format(str(system.GetChTime())))
print('Body position ', body.GetPos())
print('Body lin. vel ', body.GetPos_dt())
print('Body abs. ang. vel ', body.GetWvel_par())
print('Body loc. ang. vel ', body.GetWvel_loc())
print('Rot. spring-damper ', spring.GetRotSpringAngle(), ' ', spring.GetRotSpringTorque())
print('---------------')
frame += 1
application.EndScene() | bsd-3-clause |
sszlm/MissionPlanner | Lib/os.py | 52 | 27059 | r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
return name in globals()
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import warnings
msg = "os.popen2 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import warnings
msg = "os.popen3 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import warnings
msg = "os.popen4 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
try:
bs = b""
while n > len(bs):
bs += read(_urandomfd, n - len(bs))
finally:
close(_urandomfd)
return bs
| gpl-3.0 |
scrollback/kuma | vendor/packages/pyparsing/docs/examples/chemicalFormulas.py | 16 | 2052 | # chemicalFormulas.py
#
# Copyright (c) 2003, Paul McGuire
#
from pyparsing import Word, Optional, OneOrMore, Group, ParseException, Regex
atomicWeight = {
"O" : 15.9994,
"H" : 1.00794,
"Na" : 22.9897,
"Cl" : 35.4527,
"C" : 12.0107
}
def test( bnf, strg, fn=None ):
try:
print strg,"->", bnf.parseString( strg ),
except ParseException, pe:
print pe
else:
if fn != None:
print fn( bnf.parseString( strg ) )
else:
print
digits = "0123456789"
# Version 1
element = Regex("A[cglmrstu]|B[aehikr]?|C[adeflmorsu]?|D[bsy]|"
"E[rsu]|F[emr]?|G[ade]|H[efgos]?|I[nr]?|Kr?|L[airu]|"
"M[dgnot]|N[abdeiop]?|Os?|P[abdmortu]?|R[abefghnu]|"
"S[bcegimnr]?|T[abcehilm]|Uu[bhopqst]|U|V|W|Xe|Yb?|Z[nr]")
elementRef = Group( element + Optional( Word( digits ), default="1" ) )
formula = OneOrMore( elementRef )
fn = lambda elemList : sum( [ atomicWeight[elem]*int(qty) for elem,qty in elemList ] )
test( formula, "H2O", fn )
test( formula, "C6H5OH", fn )
test( formula, "NaCl", fn )
print
# Version 2 - access parsed items by field name
elementRef = Group( element.setResultsName("symbol") + \
Optional( Word( digits ), default="1" ).setResultsName("qty") )
formula = OneOrMore( elementRef )
fn = lambda elemList : sum( [ atomicWeight[elem.symbol]*int(elem.qty) for elem in elemList ] )
test( formula, "H2O", fn )
test( formula, "C6H5OH", fn )
test( formula, "NaCl", fn )
print
# Version 3 - convert integers during parsing process
integer = Word( digits ).setParseAction(lambda t:int(t[0]))
elementRef = Group( element.setResultsName("symbol") + \
Optional( integer, default=1 ).setResultsName("qty") )
formula = OneOrMore( elementRef )
fn = lambda elemList : sum( [ atomicWeight[elem.symbol]*elem.qty for elem in elemList ] )
test( formula, "H2O", fn )
test( formula, "C6H5OH", fn )
test( formula, "NaCl", fn )
| mpl-2.0 |
spectrumone/django-outlook-api | python_tutorial/tutorial/outlookservice.py | 1 | 4799 | import requests
import uuid
import json
outlook_api_endpoint = 'https://outlook.office.com/api/v2.0{0}'
# Generic API Sending
def make_api_call(method, url, token, payload = None, parameters = None):
# Send these headers with all API calls
headers = { 'User-Agent' : 'django-tutorial/1.0',
'Authorization' : 'Bearer {0}'.format(token),
'Accept' : 'application/json'}
# Use these headers to instrument calls. Makes it easier
# to correlate requests and responses in case of problems
# and is a recommended best practice.
request_id = str(uuid.uuid4())
instrumentation = { 'client-request-id' : request_id,
'return-client-request-id' : 'true' }
headers.update(instrumentation)
response = None
payload = {
"Subject": "Discuss the Calendar REST API",
"Body": {
"ContentType": "HTML",
"Content": "I think it will meet our requirements!"
},
"Start": {
"DateTime": "2014-04-04T18:00:00",
"TimeZone": "Pacific Standard Time"
},
"End": {
"DateTime": "2014-04-04T19:00:00",
"TimeZone": "Pacific Standard Time"
},
"Attendees": [
{
"EmailAddress": {
"Address": "janets@a830edad9050849NDA1.onmicrosoft.com",
"Name": "Janet Schorr"
},
"Type": "Required"
}
]
}
if (method.upper() == 'GET'):
response = requests.get(url, headers = headers, params = parameters)
elif (method.upper() == 'DELETE'):
response = requests.delete(url, headers = headers, params = parameters)
elif (method.upper() == 'PATCH'):
headers.update({ 'Content-Type' : 'application/json' })
response = requests.patch(url, headers = headers, data = json.dumps(payload), params = parameters)
elif (method.upper() == 'POST'):
headers.update({ 'Content-Type' : 'application/json' })
response = requests.post(url, headers = headers, data = json.dumps(payload), params = parameters)
return response
def get_my_messages(access_token):
get_messages_url = outlook_api_endpoint.format('/Me/Messages')
# Use OData query parameters to control the results
# - Only first 10 results returned
# - Only return the ReceivedDateTime, Subject, and From fields
# - Sort the results by the ReceivedDateTime field in descending order
query_parameters = {'$top': '10',
'$select': 'ReceivedDateTime,Subject,From',
'$orderby': 'ReceivedDateTime DESC'}
r = make_api_call('GET', get_messages_url, access_token, parameters = query_parameters)
if (r.status_code == requests.codes.ok):
return r.json()
else:
return "{0}: {1}".format(r.status_code, r.text)
def get_my_events(access_token):
get_events_url = outlook_api_endpoint.format('/Me/Events')
# Use OData query parameters to control the results
# - Only first 10 results returned
# - Only return the Subject, Start, and End fields
# - Sort the results by the Start field in ascending order
query_parameters = {'$top': '10',
'$select': 'Subject,Start,End',
'$orderby': 'Start/DateTime ASC'}
r = make_api_call('GET', get_events_url, access_token, parameters = query_parameters)
if (r.status_code == requests.codes.ok):
return r.json()
else:
return "{0}: {1}".format(r.status_code, r.text)
def post_my_events(access_token):
post_events_url = outlook_api_endpoint.format('/Me/Events')
r = make_api_call('POST', post_events_url, access_token)
if (r.status_code == requests.codes.ok):
return r.json()
else:
return "{0}: {1}".format(r.status_code, r.text)
def get_my_contacts(access_token):
get_contacts_url = outlook_api_endpoint.format('/Me/Contacts')
# Use OData query parameters to control the results
# - Only first 10 results returned
# - Only return the GivenName, Surname, and EmailAddresses fields
# - Sort the results by the GivenName field in ascending order
query_parameters = {'$top': '10',
'$select': 'GivenName,Surname,EmailAddresses',
'$orderby': 'GivenName ASC'}
r = make_api_call('GET', get_contacts_url, access_token, parameters = query_parameters)
if (r.status_code == requests.codes.ok):
return r.json()
else:
return "{0}: {1}".format(r.status_code, r.text)
| mit |
abutcher/openshift-ansible | roles/lib_openshift/src/class/oc_configmap.py | 46 | 6206 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-arguments
class OCConfigMap(OpenShiftCLI):
''' Openshift ConfigMap Class
ConfigMaps are a way to store data inside of objects
'''
def __init__(self,
name,
from_file,
from_literal,
state,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCConfigMap, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = name
self.state = state
self._configmap = None
self._inc_configmap = None
self.from_file = from_file if from_file is not None else {}
self.from_literal = from_literal if from_literal is not None else {}
@property
def configmap(self):
if self._configmap is None:
self._configmap = self.get()
return self._configmap
@configmap.setter
def configmap(self, inc_map):
self._configmap = inc_map
@property
def inc_configmap(self):
if self._inc_configmap is None:
results = self.create(dryrun=True, output=True)
self._inc_configmap = results['results']
return self._inc_configmap
@inc_configmap.setter
def inc_configmap(self, inc_map):
self._inc_configmap = inc_map
def from_file_to_params(self):
'''return from_files in a string ready for cli'''
return ["--from-file={}={}".format(key, value) for key, value in self.from_file.items()]
def from_literal_to_params(self):
'''return from_literal in a string ready for cli'''
return ["--from-literal={}={}".format(key, value) for key, value in self.from_literal.items()]
def get(self):
'''return a configmap by name '''
results = self._get('configmap', self.name)
if results['returncode'] == 0 and results['results'][0]:
self.configmap = results['results'][0]
if results['returncode'] != 0 and '"{}" not found'.format(self.name) in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a configmap by name'''
return self._delete('configmap', self.name)
def create(self, dryrun=False, output=False):
'''Create a configmap
:dryrun: Product what you would have done. default: False
:output: Whether to parse output. default: False
'''
cmd = ['create', 'configmap', self.name]
if self.from_literal is not None:
cmd.extend(self.from_literal_to_params())
if self.from_file is not None:
cmd.extend(self.from_file_to_params())
if dryrun:
cmd.extend(['--dry-run', '-ojson'])
results = self.openshift_cmd(cmd, output=output)
return results
def update(self):
'''run update configmap '''
return self._replace_content('configmap', self.name, self.inc_configmap)
def needs_update(self):
'''compare the current configmap with the proposed and return if they are equal'''
return not Utils.check_def_equal(self.inc_configmap, self.configmap, debug=self.verbose)
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
oc_cm = OCConfigMap(params['name'],
params['from_file'],
params['from_literal'],
params['state'],
params['namespace'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = oc_cm.get()
if 'failed' in api_rval:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
if not params['name']:
return {'failed': True,
'msg': 'Please specify a name when state is absent|present.'}
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], params['name']):
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = oc_cm.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Create
########
if state == 'present':
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True, 'msg': 'Would have performed a create.'}
api_rval = oc_cm.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
api_rval = oc_cm.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_cm.needs_update():
api_rval = oc_cm.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
api_rval = oc_cm.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
| apache-2.0 |
andreabrambilla/libres | python/tests/res/fm/test_fm_config.py | 2 | 1414 | import os
import unittest
from tests import ResTest
class TestFMValidity(ResTest):
def setUp(self):
pass
def _extract_executable(self, filename):
with open(filename,'r') as f:
for line in f.readlines():
l = line.strip().split()
if len(l)>1 and l[0]=='EXECUTABLE':
return l[1]
return None
def _file_exist_and_is_executable(self, file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
def test_validate_scripts(self):
fm_path = 'share/ert/forward-models'
fm_path = os.path.join(self.SOURCE_ROOT, fm_path)
for fm_dir in os.listdir(fm_path):
fm_dir = os.path.join(fm_path, fm_dir)
#get all sub-folder in forward-models
if os.path.isdir(fm_dir):
files = os.listdir(fm_dir)
for fn in files:
fn = os.path.join(fm_dir,fn)
#get all files in sub-folders
if os.path.isfile(fn):
#extract executable (if any)
executable_script = self._extract_executable(fn)
if executable_script is not None:
self.assertTrue(self._file_exist_and_is_executable(os.path.join(fm_dir, executable_script)))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
mwiencek/picard | picard/ui/itemviews.py | 1 | 27669 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import re
from PyQt4 import QtCore, QtGui
from picard.album import Album, NatAlbum
from picard.cluster import Cluster, ClusterList, UnmatchedFiles
from picard.file import File
from picard.track import Track, NonAlbumTrack
from picard.util import encode_filename, icontheme, partial
from picard.config import Option, TextOption
from picard.plugin import ExtensionPoint
from picard.ui.ratingwidget import RatingWidget
from picard.ui.collectionmenu import CollectionMenu
class BaseAction(QtGui.QAction):
NAME = "Unknown"
MENU = []
def __init__(self):
QtGui.QAction.__init__(self, self.NAME, None)
self.triggered.connect(self.__callback)
def __callback(self):
objs = self.tagger.window.panel.selected_objects()
self.callback(objs)
def callback(self, objs):
raise NotImplementedError
_album_actions = ExtensionPoint()
_cluster_actions = ExtensionPoint()
_clusterlist_actions = ExtensionPoint()
_track_actions = ExtensionPoint()
_file_actions = ExtensionPoint()
def register_album_action(action):
_album_actions.register(action.__module__, action)
def register_cluster_action(action):
_cluster_actions.register(action.__module__, action)
def register_clusterlist_action(action):
_clusterlist_actions.register(action.__module__, action)
def register_track_action(action):
_track_actions.register(action.__module__, action)
def register_file_action(action):
_file_actions.register(action.__module__, action)
def get_match_color(similarity, basecolor):
c1 = (basecolor.red(), basecolor.green(), basecolor.blue())
c2 = (223, 125, 125)
return QtGui.QColor(
c2[0] + (c1[0] - c2[0]) * similarity,
c2[1] + (c1[1] - c2[1]) * similarity,
c2[2] + (c1[2] - c2[2]) * similarity)
class MainPanel(QtGui.QSplitter):
options = [
Option("persist", "splitter_state", QtCore.QByteArray(), QtCore.QVariant.toByteArray),
]
columns = [
(N_('Title'), 'title'),
(N_('Length'), '~length'),
(N_('Artist'), 'artist'),
]
def __init__(self, window, parent=None):
QtGui.QSplitter.__init__(self, parent)
self.window = window
self.create_icons()
self.views = [FileTreeView(window, self), AlbumTreeView(window, self)]
self.views[0].itemSelectionChanged.connect(self.update_selection_0)
self.views[1].itemSelectionChanged.connect(self.update_selection_1)
self._selected_view = 0
self._ignore_selection_changes = False
self._selected_objects = set()
TreeItem.window = window
TreeItem.base_color = self.palette().base().color()
TreeItem.text_color = self.palette().text().color()
TrackItem.track_colors = {
File.NORMAL: self.config.setting["color_saved"],
File.CHANGED: TreeItem.text_color,
File.PENDING: self.config.setting["color_pending"],
File.ERROR: self.config.setting["color_error"],
}
FileItem.file_colors = {
File.NORMAL: TreeItem.text_color,
File.CHANGED: self.config.setting["color_modified"],
File.PENDING: self.config.setting["color_pending"],
File.ERROR: self.config.setting["color_error"],
}
def selected_objects(self):
return list(self._selected_objects)
def save_state(self):
self.config.persist["splitter_state"] = self.saveState()
for view in self.views:
view.save_state()
def restore_state(self):
self.restoreState(self.config.persist["splitter_state"])
def create_icons(self):
if hasattr(QtGui.QStyle, 'SP_DirIcon'):
ClusterItem.icon_dir = self.style().standardIcon(QtGui.QStyle.SP_DirIcon)
else:
ClusterItem.icon_dir = icontheme.lookup('folder', icontheme.ICON_SIZE_MENU)
AlbumItem.icon_cd = icontheme.lookup('media-optical', icontheme.ICON_SIZE_MENU)
AlbumItem.icon_cd_saved = icontheme.lookup('media-optical-saved', icontheme.ICON_SIZE_MENU)
TrackItem.icon_note = QtGui.QIcon(":/images/note.png")
FileItem.icon_file = QtGui.QIcon(":/images/file.png")
FileItem.icon_file_pending = QtGui.QIcon(":/images/file-pending.png")
FileItem.icon_error = icontheme.lookup('dialog-error', icontheme.ICON_SIZE_MENU)
FileItem.icon_saved = QtGui.QIcon(":/images/track-saved.png")
FileItem.match_icons = [
QtGui.QIcon(":/images/match-50.png"),
QtGui.QIcon(":/images/match-60.png"),
QtGui.QIcon(":/images/match-70.png"),
QtGui.QIcon(":/images/match-80.png"),
QtGui.QIcon(":/images/match-90.png"),
QtGui.QIcon(":/images/match-100.png"),
]
FileItem.match_pending_icons = [
QtGui.QIcon(":/images/match-pending-50.png"),
QtGui.QIcon(":/images/match-pending-60.png"),
QtGui.QIcon(":/images/match-pending-70.png"),
QtGui.QIcon(":/images/match-pending-80.png"),
QtGui.QIcon(":/images/match-pending-90.png"),
QtGui.QIcon(":/images/match-pending-100.png"),
]
self.icon_plugins = icontheme.lookup('applications-system', icontheme.ICON_SIZE_MENU)
def update_selection(self, i, j):
self._selected_view = i
self.views[j].clearSelection()
self._selected_objects.clear()
self._selected_objects.update(item.obj for item in self.views[i].selectedItems())
self.window.update_selection(self.selected_objects())
def update_selection_0(self):
if not self._ignore_selection_changes:
self._ignore_selection_changes = True
self.update_selection(0, 1)
self._ignore_selection_changes = False
def update_selection_1(self):
if not self._ignore_selection_changes:
self._ignore_selection_changes = True
self.update_selection(1, 0)
self._ignore_selection_changes = False
def update_current_view(self):
self.update_selection(self._selected_view, abs(self._selected_view - 1))
def remove(self, objects):
self._ignore_selection_changes = True
self.tagger.remove(objects)
self._ignore_selection_changes = False
view = self.views[self._selected_view]
index = view.currentIndex()
if index.isValid():
# select the current index
view.setCurrentIndex(index)
else:
self.update_current_view()
class BaseTreeView(QtGui.QTreeWidget):
options = [
Option("setting", "color_modified", QtGui.QColor(QtGui.QPalette.WindowText), QtGui.QColor),
Option("setting", "color_saved", QtGui.QColor(0, 128, 0), QtGui.QColor),
Option("setting", "color_error", QtGui.QColor(200, 0, 0), QtGui.QColor),
Option("setting", "color_pending", QtGui.QColor(128, 128, 128), QtGui.QColor),
]
def __init__(self, window, parent=None):
QtGui.QTreeWidget.__init__(self, parent)
self.window = window
self.panel = parent
self.numHeaderSections = len(MainPanel.columns)
self.setHeaderLabels([_(h) for h, n in MainPanel.columns])
self.restore_state()
self.setAcceptDrops(True)
self.setDragEnabled(True)
self.setDropIndicatorShown(True)
self.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
# enable sorting, but don't actually use it by default
# XXX it would be nice to be able to go to the 'no sort' mode, but the
# internal model that QTreeWidget uses doesn't support it
self.header().setSortIndicator(-1, QtCore.Qt.AscendingOrder)
self.setSortingEnabled(True)
self.expand_all_action = QtGui.QAction(_("&Expand all"), self)
self.expand_all_action.triggered.connect(self.expandAll)
self.collapse_all_action = QtGui.QAction(_("&Collapse all"), self)
self.collapse_all_action.triggered.connect(self.collapseAll)
self.doubleClicked.connect(self.activate_item)
def contextMenuEvent(self, event):
item = self.itemAt(event.pos())
if not item:
return
obj = item.obj
plugin_actions = None
can_view_info = self.window.view_info_action.isEnabled()
menu = QtGui.QMenu(self)
if isinstance(obj, Track):
if can_view_info:
menu.addAction(self.window.view_info_action)
plugin_actions = list(_track_actions)
if obj.num_linked_files == 1:
menu.addAction(self.window.open_file_action)
menu.addAction(self.window.open_folder_action)
plugin_actions.extend(_file_actions)
menu.addAction(self.window.browser_lookup_action)
menu.addSeparator()
if isinstance(obj, NonAlbumTrack):
menu.addAction(self.window.refresh_action)
elif isinstance(obj, Cluster):
menu.addAction(self.window.browser_lookup_action)
menu.addSeparator()
menu.addAction(self.window.autotag_action)
menu.addAction(self.window.analyze_action)
if isinstance(obj, UnmatchedFiles):
menu.addAction(self.window.cluster_action)
plugin_actions = list(_cluster_actions)
elif isinstance(obj, ClusterList):
menu.addAction(self.window.autotag_action)
menu.addAction(self.window.analyze_action)
plugin_actions = list(_clusterlist_actions)
elif isinstance(obj, File):
if can_view_info:
menu.addAction(self.window.view_info_action)
menu.addAction(self.window.open_file_action)
menu.addAction(self.window.open_folder_action)
menu.addAction(self.window.browser_lookup_action)
menu.addSeparator()
menu.addAction(self.window.autotag_action)
menu.addAction(self.window.analyze_action)
plugin_actions = list(_file_actions)
elif isinstance(obj, Album):
if can_view_info:
menu.addAction(self.window.view_info_action)
menu.addAction(self.window.browser_lookup_action)
menu.addSeparator()
menu.addAction(self.window.refresh_action)
plugin_actions = list(_album_actions)
menu.addAction(self.window.save_action)
menu.addAction(self.window.remove_action)
bottom_separator = False
if isinstance(obj, Album) and not isinstance(obj, NatAlbum) and obj.loaded:
releases_menu = QtGui.QMenu(_("&Other versions"), menu)
menu.addSeparator()
menu.addMenu(releases_menu)
loading = releases_menu.addAction(_('Loading...'))
loading.setEnabled(False)
bottom_separator = True
if len(self.selectedIndexes()) == len(MainPanel.columns):
def _add_other_versions():
releases_menu.removeAction(loading)
for version in obj.release_group.versions:
action = releases_menu.addAction(version["name"])
action.setCheckable(True)
if obj.id == version["id"]:
action.setChecked(True)
action.triggered.connect(partial(obj.switch_release_version, version["id"]))
_add_other_versions() if obj.release_group.loaded else \
obj.release_group.load_versions(_add_other_versions)
releases_menu.setEnabled(True)
else:
releases_menu.setEnabled(False)
if self.config.setting["enable_ratings"] and \
len(self.window.selected_objects) == 1 and isinstance(obj, Track):
menu.addSeparator()
action = QtGui.QWidgetAction(menu)
action.setDefaultWidget(RatingWidget(menu, obj))
menu.addAction(action)
menu.addSeparator()
selected_albums = [a for a in self.window.selected_objects if type(a) == Album]
if selected_albums:
if not bottom_separator:
menu.addSeparator()
menu.addMenu(CollectionMenu(selected_albums, _("Collections"), menu))
if plugin_actions:
plugin_menu = QtGui.QMenu(_("&Plugins"), menu)
plugin_menu.setIcon(self.panel.icon_plugins)
menu.addSeparator()
menu.addMenu(plugin_menu)
plugin_menus = {}
for action in plugin_actions:
action_menu = plugin_menu
for index in xrange(1, len(action.MENU)):
key = tuple(action.MENU[:index])
try:
action_menu = plugin_menus[key]
except KeyError:
action_menu = plugin_menus[key] = action_menu.addMenu(key[-1])
action_menu.addAction(action)
if isinstance(obj, Cluster) or isinstance(obj, ClusterList) or isinstance(obj, Album):
menu.addSeparator()
menu.addAction(self.expand_all_action)
menu.addAction(self.collapse_all_action)
menu.exec_(event.globalPos())
event.accept()
def restore_state(self):
sizes = self.config.persist[self.view_sizes.name]
header = self.header()
sizes = sizes.split(" ")
try:
for i in range(self.numHeaderSections - 1):
header.resizeSection(i, int(sizes[i]))
except IndexError:
pass
def save_state(self):
cols = range(self.numHeaderSections - 1)
sizes = " ".join(str(self.header().sectionSize(i)) for i in cols)
self.config.persist[self.view_sizes.name] = sizes
def supportedDropActions(self):
return QtCore.Qt.CopyAction | QtCore.Qt.MoveAction
def mimeTypes(self):
"""List of MIME types accepted by this view."""
return ["text/uri-list", "application/picard.album-list"]
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.acceptProposedAction()
def startDrag(self, supportedActions):
"""Start drag, *without* using pixmap."""
items = self.selectedItems()
if items:
drag = QtGui.QDrag(self)
drag.setMimeData(self.mimeData(items))
drag.start(supportedActions)
def mimeData(self, items):
"""Return MIME data for specified items."""
album_ids = []
files = []
url = QtCore.QUrl.fromLocalFile
for item in items:
obj = item.obj
if isinstance(obj, Album):
album_ids.append(str(obj.id))
elif isinstance(obj, Track):
files.extend(url(file.filename) for file in obj.linked_files)
elif isinstance(obj, File):
files.append(url(obj.filename))
elif isinstance(obj, Cluster):
files.extend(url(file.filename) for file in obj.files)
elif isinstance(obj, ClusterList):
files.extend(url(file.filename) for cluster in obj for file in cluster.files)
mimeData = QtCore.QMimeData()
mimeData.setData("application/picard.album-list", "\n".join(album_ids))
if files:
mimeData.setUrls(files)
return mimeData
@staticmethod
def drop_urls(urls, target):
files = []
new_files = []
for url in urls:
if url.scheme() == "file" or not url.scheme():
# Dropping a file from iTunes gives a filename with a NULL terminator
filename = os.path.normpath(os.path.realpath(unicode(url.toLocalFile()).rstrip("\0")))
file = BaseTreeView.tagger.files.get(filename)
if file:
files.append(file)
elif os.path.isdir(encode_filename(filename)):
BaseTreeView.tagger.add_directory(filename)
else:
new_files.append(filename)
elif url.scheme() in ("http", "https"):
path = unicode(url.path())
match = re.search(r"/(release|recording)/([0-9a-z\-]{36})", path)
if match:
entity = match.group(1)
mbid = match.group(2)
if entity == "release":
BaseTreeView.tagger.load_album(mbid)
elif entity == "recording":
BaseTreeView.tagger.load_nat(mbid)
if files:
BaseTreeView.tagger.move_files(files, target)
if new_files:
BaseTreeView.tagger.add_files(new_files, target=target)
def dropEvent(self, event):
return QtGui.QTreeView.dropEvent(self, event)
def dropMimeData(self, parent, index, data, action):
target = None
if parent:
if index == parent.childCount():
item = parent
else:
item = parent.child(index)
if item is not None:
target = item.obj
self.log.debug("Drop target = %r", target)
handled = False
# text/uri-list
urls = data.urls()
if urls:
if target is None:
target = self.tagger.unmatched_files
self.drop_urls(urls, target)
handled = True
# application/picard.album-list
albums = data.data("application/picard.album-list")
if albums:
if isinstance(self, FileTreeView) and target is None:
target = self.tagger.unmatched_files
albums = [self.tagger.load_album(id) for id in str(albums).split("\n")]
self.tagger.move_files(self.tagger.get_files_from_objects(albums), target)
handled = True
return handled
def activate_item(self, index):
obj = self.itemFromIndex(index).obj
# Double-clicking albums should expand them. The album info can be
# viewed by using the toolbar button.
if not isinstance(obj, Album) and obj.can_view_info():
self.window.view_info()
def add_cluster(self, cluster, parent_item=None):
if parent_item is None:
parent_item = self.clusters
cluster_item = ClusterItem(cluster, not cluster.special, parent_item)
if cluster.hide_if_empty and not cluster.files:
cluster_item.update()
cluster_item.setHidden(True)
else:
cluster_item.add_files(cluster.files)
def moveCursor(self, action, modifiers):
if action in (QtGui.QAbstractItemView.MoveUp, QtGui.QAbstractItemView.MoveDown):
item = self.currentItem()
if item and not item.isSelected():
self.setCurrentItem(item)
return QtGui.QTreeWidget.moveCursor(self, action, modifiers)
class FileTreeView(BaseTreeView):
view_sizes = TextOption("persist", "file_view_sizes", "250 40 100")
def __init__(self, window, parent=None):
BaseTreeView.__init__(self, window, parent)
self.unmatched_files = ClusterItem(self.tagger.unmatched_files, False, self)
self.unmatched_files.update()
self.setItemExpanded(self.unmatched_files, True)
self.clusters = ClusterItem(self.tagger.clusters, False, self)
self.clusters.setText(0, _(u"Clusters"))
self.setItemExpanded(self.clusters, True)
self.tagger.cluster_added.connect(self.add_cluster)
self.tagger.cluster_removed.connect(self.remove_cluster)
def remove_cluster(self, cluster):
cluster.item.setSelected(False)
self.clusters.removeChild(cluster.item)
class AlbumTreeView(BaseTreeView):
view_sizes = TextOption("persist", "album_view_sizes", "250 40 100")
def __init__(self, window, parent=None):
BaseTreeView.__init__(self, window, parent)
self.tagger.album_added.connect(self.add_album)
self.tagger.album_removed.connect(self.remove_album)
def add_album(self, album):
item = AlbumItem(album, True, self)
item.setIcon(0, AlbumItem.icon_cd)
for i, column in enumerate(MainPanel.columns):
font = item.font(i)
font.setBold(True)
item.setFont(i, font)
item.setText(i, album.column(column[1]))
self.add_cluster(album.unmatched_files, item)
def remove_album(self, album):
album.item.setSelected(False)
self.takeTopLevelItem(self.indexOfTopLevelItem(album.item))
class TreeItem(QtGui.QTreeWidgetItem):
__lt__ = lambda self, other: False
def __init__(self, obj, sortable, *args):
QtGui.QTreeWidgetItem.__init__(self, *args)
self.obj = obj
if obj is not None:
obj.item = self
if sortable:
self.__lt__ = self._lt
def _lt(self, other):
column = self.treeWidget().sortColumn()
if column == 1:
return (self.obj.metadata.length or 0) < (other.obj.metadata.length or 0)
return self.text(column).toLower() < other.text(column).toLower()
class ClusterItem(TreeItem):
def __init__(self, *args):
TreeItem.__init__(self, *args)
self.setIcon(0, ClusterItem.icon_dir)
def update(self):
for i, column in enumerate(MainPanel.columns):
self.setText(i, self.obj.column(column[1]))
album = self.obj.related_album
if self.obj.special and album and album.loaded:
album.item.update(update_tracks=False)
if self.isSelected():
TreeItem.window.update_selection()
def add_file(self, file):
self.add_files([file])
def add_files(self, files):
if self.obj.hide_if_empty and self.obj.files:
self.setHidden(False)
self.update()
items = []
for file in files:
item = FileItem(file, True)
item.update()
items.append(item)
self.addChildren(items)
def remove_file(self, file):
file.item.setSelected(False)
self.removeChild(file.item)
self.update()
if self.obj.hide_if_empty and not self.obj.files:
self.setHidden(True)
class AlbumItem(TreeItem):
def update(self, update_tracks=True):
album = self.obj
if update_tracks:
oldnum = self.childCount() - 1
newnum = len(album.tracks)
if oldnum > newnum: # remove old items
for i in xrange(oldnum - newnum):
self.takeChild(newnum - 1)
oldnum = newnum
# update existing items
for i in xrange(oldnum):
item = self.child(i)
track = album.tracks[i]
item.obj = track
track.item = item
item.update(update_album=False)
if newnum > oldnum: # add new items
items = []
for i in xrange(newnum - 1, oldnum - 1, -1): # insertChildren is backwards
item = TrackItem(album.tracks[i], False)
item.setHidden(False) # Workaround to make sure the parent state gets updated
items.append(item)
self.insertChildren(oldnum, items)
for item in items: # Update after insertChildren so that setExpanded works
item.update(update_album=False)
self.setIcon(0, AlbumItem.icon_cd_saved if album.is_complete() else AlbumItem.icon_cd)
for i, column in enumerate(MainPanel.columns):
self.setText(i, album.column(column[1]))
if self.isSelected():
TreeItem.window.update_selection()
class TrackItem(TreeItem):
def update(self, update_album=True):
track = self.obj
if track.num_linked_files == 1:
file = track.linked_files[0]
file.item = self
color = TrackItem.track_colors[file.state]
bgcolor = get_match_color(file.similarity, TreeItem.base_color)
icon = FileItem.decide_file_icon(file)
self.takeChildren()
else:
color = TreeItem.text_color
bgcolor = get_match_color(1, TreeItem.base_color)
icon = TrackItem.icon_note
oldnum = self.childCount()
newnum = track.num_linked_files
if oldnum > newnum: # remove old items
for i in xrange(oldnum - newnum):
self.takeChild(newnum - 1).obj.item = None
oldnum = newnum
for i in xrange(oldnum): # update existing items
item = self.child(i)
file = track.linked_files[i]
item.obj = file
file.item = item
item.update()
if newnum > oldnum: # add new items
items = []
for i in xrange(newnum - 1, oldnum - 1, -1):
item = FileItem(track.linked_files[i], False)
item.update()
items.append(item)
self.addChildren(items)
self.setExpanded(True)
self.setIcon(0, icon)
for i, column in enumerate(MainPanel.columns):
self.setText(i, track.column(column[1]))
self.setForeground(i, color)
self.setBackground(i, bgcolor)
if self.isSelected():
TreeItem.window.update_selection()
if update_album:
self.parent().update(update_tracks=False)
class FileItem(TreeItem):
def update(self):
file = self.obj
self.setIcon(0, FileItem.decide_file_icon(file))
color = FileItem.file_colors[file.state]
bgcolor = get_match_color(file.similarity, TreeItem.base_color)
for i, column in enumerate(MainPanel.columns):
self.setText(i, file.column(column[1]))
self.setForeground(i, color)
self.setBackground(i, bgcolor)
if self.isSelected():
TreeItem.window.update_selection()
@staticmethod
def decide_file_icon(file):
if file.state == File.ERROR:
return FileItem.icon_error
elif isinstance(file.parent, Track):
if file.state == File.NORMAL:
return FileItem.icon_saved
elif file.state == File.PENDING:
return FileItem.match_pending_icons[int(file.similarity * 5 + 0.5)]
else:
return FileItem.match_icons[int(file.similarity * 5 + 0.5)]
elif file.state == File.PENDING:
return FileItem.icon_file_pending
else:
return FileItem.icon_file
| gpl-2.0 |
mglukhikh/intellij-community | python/testData/inspections/PyArgumentListInspection/typingNamedTupleReplace.py | 5 | 1593 | import typing
MyTup1 = typing.NamedTuple("MyTup1", bar=int, baz=int)
mt1 = MyTup1(1, 2)
# empty
mt1._replace()
# one
mt1._replace(bar=2)
mt1._replace(baz=1)
mt1._replace(<warning descr="Unexpected argument">foo=1</warning>)
mt1._replace(<warning descr="Unexpected argument">1</warning>)
# two
mt1._replace(bar=1, baz=2)
mt1._replace(baz=2, bar=1)
mt1._replace(baz=2, <warning descr="Unexpected argument">foo=1</warning>)
mt1._replace(<warning descr="Unexpected argument">2</warning>, <warning descr="Unexpected argument">1</warning>)
# two
mt1._replace(bar=1, baz=2, <warning descr="Unexpected argument">foo=3</warning>)
mt1._replace(<warning descr="Unexpected argument">1</warning>, <warning descr="Unexpected argument">2</warning>, <warning descr="Unexpected argument">3</warning>)
class MyTup2(typing.NamedTuple):
bar: int
baz: int
mt2 = MyTup2(1, 2)
# empty
mt2._replace()
# one
mt2._replace(bar=2)
mt2._replace(baz=1)
mt2._replace(<warning descr="Unexpected argument">foo=1</warning>)
mt2._replace(<warning descr="Unexpected argument">1</warning>)
# two
mt2._replace(bar=1, baz=2)
mt2._replace(baz=2, bar=1)
mt2._replace(baz=2, <warning descr="Unexpected argument">foo=1</warning>)
mt2._replace(<warning descr="Unexpected argument">2</warning>, <warning descr="Unexpected argument">1</warning>)
# two
mt2._replace(bar=1, baz=2, <warning descr="Unexpected argument">foo=3</warning>)
mt2._replace(<warning descr="Unexpected argument">1</warning>, <warning descr="Unexpected argument">2</warning>, <warning descr="Unexpected argument">3</warning>) | apache-2.0 |
rubyinhell/brython | www/tests/test_re.py | 15 | 2078 | import re
m = re.search('world', 'hello world')
assert m is not None
assert m.string == 'hello world'
assert m.groups() == ()
m = re.match('world', 'hello world')
assert m is None
m = re.match('hello', 'hello world')
assert m is not None
assert m.string == 'hello world'
assert m.groups() == ()
# Samples code in Python 3 doc MatchObject.groups (indices only)
m = re.match(r"(\d+)\.(\d+)", "24.1632")
assert m.groups() == ('24', '1632')
m = re.match(r"(\d+)\.?(\d+)?", "24")
assert m.groups() == ('24', None)
assert m.groups('0') == ('24', '0')
m = re.match(r"(\d+)\.?(\d+)? (--)", "24 --")
assert m.groups() == ('24', None, '--')
assert m.groups('0') == ('24', '0', '--')
# Samples code in Python 3 doc MatchObject.group (indices only)
m = re.match(r"(\w+) (\w+)", "Isaac Newton, physicist")
assert m.group(0) == 'Isaac Newton'
assert m.group(1) == 'Isaac'
assert m.group(2) == 'Newton'
assert m.group(1, 2) == ('Isaac', 'Newton')
m = re.match(r"(..)+", "a1b2c3")
assert m.group(0) == 'a1b2c3'
assert m.group(1) == 'c3'
_parser = re.compile(r""" # A numeric string consists of:
\s*
(?P<sign>[-+])? # an optional sign, followed by either...
(
(?=\d|\.\d) # ...a number (with at least one digit)
(?P<int>\d*) # having a (possibly empty) integer part
(\.(?P<frac>\d*))? # followed by an optional fractional part
(E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
|
Inf(inity)? # ...an infinity, or...
|
(?P<signal>s)? # ...an (optionally signaling)
NaN # NaN
(?P<diag>\d*) # with (possibly empty) diagnostic info.
)
\s*
\Z
""", re.VERBOSE | re.IGNORECASE).match
_m=_parser("3.0")
assert _m.group('int') == '3'
_m=_parser("NaN")
assert _m.group('diag') is not None
_m=_parser("Inf")
assert _m.group('diag') is None and _m.group('sign') is None
_m=_parser("-Inf")
assert _m.group('diag') is None and _m.group('sign') == '-'
print('all tests ok..')
| bsd-3-clause |
GitHublong/hue | desktop/core/ext-py/Django-1.6.10/django/conf/locale/en_GB/formats.py | 117 | 2112 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j M Y' # '25 Oct 2006'
TIME_FORMAT = 'P' # '2:30 pm'
DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 pm'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'j F' # '25 October'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 pm'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| apache-2.0 |
frankban/UbuntuPaste | ubuntupaste.py | 1 | 6417 | # This software is licensed under the GNU Affero General Public License
# version 3 (see the file LICENSE).
import itertools
import os
import pwd
import threading
import urllib
import urllib2
import webbrowser
import sublime
import sublime_plugin
class UserInterface(object):
"""User interface for this plugin."""
def __init__(self, command_name, view):
self.command_name = command_name.title()
self.view = view
self.count = itertools.count()
def _get_content(self, contents):
return '{0}: {1}'.format(self.command_name, ' '.join(contents))
def message(self, *contents):
"""Display a message in the status bar."""
sublime.status_message(self._get_content(contents))
def status(self, *contents):
"""Add a status to the view, using contents as value."""
self.view.set_status(self.command_name, self._get_content(contents))
def progress(self, url):
"""Show pasting progress."""
dots = '.' * (self.count.next() % 4)
self.status('Pasting to', url, '[', dots.ljust(3), ']')
def error(self, *contents):
"""Display an error in the status bar."""
self.message('ERROR:', *contents)
def success(self, result, copy_to_clipboard, open_in_browser):
"""Paste succeded."""
contents = ['URL:', result, '|']
if copy_to_clipboard:
contents.append('Copied to your clipboard!')
if open_in_browser:
contents.append('Opened in your browser!')
self.message(*contents)
def done(self):
"""Erase the status messages."""
self.view.erase_status(self.command_name)
class Settings(object):
"""Store and validate plugin settings."""
def __init__(self, global_settings, local_settings):
self._global_settings = global_settings
self._local_settings = local_settings
self.error = None
self.options = ()
def _get_poster(self):
"""Get the current system user name."""
return os.getenv('USER', pwd.getpwuid(os.geteuid()).pw_name)
def _get_syntax(self, syntax_map, default):
"""Return the syntax to be used by the paster."""
syntax_file = self._global_settings.get('syntax')
if syntax_file is None:
return default
syntax = os.path.splitext(os.path.basename(syntax_file))[0]
return syntax_map.get(syntax.lower(), default)
def are_valid(self):
"""Validate and set up options."""
settings = self._local_settings
url = settings.get('url')
if url is None:
self.error = 'Invalid URL.'
return False
copy_to_clipboard = settings.get('copy_to_clipboard', True)
open_in_browser = settings.get('open_in_browser', False)
if not (copy_to_clipboard or open_in_browser):
self.error = 'You need to either copy or open the URL.'
return False
poster = settings.get('poster')
if not poster:
poster = self._get_poster()
sep = settings.get('sep', '\n\n # ---\n\n')
syntax_default = settings.get('syntax_default', 'text')
syntax_guess = settings.get('syntax_guess', True)
if syntax_guess:
syntax_map = settings.get('syntax_map', {})
syntax = self._get_syntax(syntax_map, syntax_default)
else:
syntax = syntax_default
self.options = (
url, copy_to_clipboard, open_in_browser, poster, sep, syntax
)
return True
class Paster(threading.Thread):
"""Paste code snippets to ubuntu pastebin."""
def __init__(self, url, **kwargs):
self.url = url
self.data = kwargs
self.error = None
self.result = None
threading.Thread.__init__(self)
def run(self):
try:
request = urllib2.Request(
self.url, urllib.urlencode(self.data),
headers={'User-Agent': 'SublimeText2'})
response = urllib2.urlopen(request, timeout=5)
except urllib2.HTTPError as err:
self.error = 'HTTP error {0}.'.format(err.code)
except urllib2.URLError as err:
self.error = 'URL error {0}.'.format(err.reason)
else:
self.result = response.url
class UbuntupasteCommand(sublime_plugin.TextCommand):
"""Paste code snippets on http://pastebin.ubuntu.com/."""
def __init__(self, *args, **kwargs):
self.ui = None
self._is_enabled = True
super(UbuntupasteCommand, self).__init__(*args, **kwargs)
def is_enabled(self):
return self._is_enabled
def get_content(self, sep):
"""Return the contents of current selections.
If no region is selected, return all the text in the current view.
"""
view = self.view
regions = [i for i in view.sel() if not i.empty()]
if not regions:
regions = [sublime.Region(0, view.size())]
return sep.join(view.substr(region) for region in regions)
def run(self, edit):
self._is_enabled = False
self.ui = UserInterface(self.name(), self.view)
settings = Settings(
self.view.settings(),
sublime.load_settings('UbuntuPaste.sublime-settings'))
if settings.are_valid():
self.handle(*settings.options)
else:
self.ui.error(settings.error)
def handle(
self, url, copy_to_clipboard, open_in_browser, poster, sep, syntax):
paster = Paster(
url, content=self.get_content(sep), poster=poster, syntax=syntax)
self.ui.progress(url)
paster.start()
self.wait(paster, copy_to_clipboard, open_in_browser)
def wait(self, paster, *args):
if not paster.is_alive():
return self.done(paster, *args)
self.ui.progress(paster.url)
sublime.set_timeout(lambda: self.wait(paster, *args), 200)
def done(self, paster, copy_to_clipboard, open_in_browser):
result = paster.result
if result:
if copy_to_clipboard:
sublime.set_clipboard(result)
if open_in_browser:
webbrowser.open(result)
self.ui.success(result, copy_to_clipboard, open_in_browser)
else:
self.ui.error(paster.error)
self.ui.done()
self._is_enabled = True
| agpl-3.0 |
MontrealCorpusTools/polyglot-server | iscan/annotator/models.py | 1 | 4133 | from django.db import models
from polyglotdb import CorpusContext
# Create your models here.
class Annotation(models.Model):
ITEM_TYPE_CHOICES = (('U', 'Utterance'),
('W', 'Word'),
('Y', 'Syllable'),
('P', 'Phone'))
corpus = models.ForeignKey('iscan.Corpus', on_delete=models.CASCADE)
item_type = models.CharField(max_length=1, choices=ITEM_TYPE_CHOICES, default='P')
label = models.CharField(max_length=100)
save_user = models.BooleanField(default=False)
def __str__(self):
return '{}'.format(self.label)
def check_hierarchy(self):
a_type = self.get_item_type_display().lower()
with CorpusContext(self.corpus.config) as c:
if not c.hierarchy.has_subannotation_type(self.label):
properties = []
if self.save_user:
properties =[('user', str)]
for field in self.fields.all():
if field.annotation_choice == 'N':
t = float
elif field.annotation_choice == 'B':
t = bool
else:
t = str
properties.append((field.label, t))
c.hierarchy.add_subannotation_type(c, a_type, self.label, properties=properties)
def add_property(self, field):
props = []
if field.annotation_choice == 'N':
t = float
elif field.annotation_choice == 'B':
t = bool
else:
t = str
props.append((field.label, t))
with CorpusContext(self.corpus.config) as c:
c.hierarchy.add_subannotation_properties(c, self.label, props)
print(c.hierarchy.subannotations)
print(c.hierarchy.subannotation_properties)
def remove_property(self, field):
props = []
props.append(field.label)
with CorpusContext(self.corpus.config) as c:
c.hierarchy.remove_subannotation_properties(c, self.label, props)
print(c.hierarchy.subannotations)
print(c.hierarchy.subannotation_properties)
def save(self, *args, **kwargs):
a_type = self.get_item_type_display().lower()
s_type = self.label
with CorpusContext(self.corpus.config) as c:
if not c.hierarchy.has_subannotation_type(s_type):
properties = []
if self.save_user:
properties =[('user', str)]
c.hierarchy.add_subannotation_type(c, a_type, s_type, properties=properties)
super(Annotation, self).save(*args, **kwargs)
print(c.hierarchy.subannotations)
print(c.hierarchy.subannotation_properties)
def delete(self, using=None, keep_parents=False):
with CorpusContext(self.corpus.config) as c:
c.hierarchy.remove_subannotation_type(c, self.label)
super(Annotation, self).delete(using=None, keep_parents=False)
class AnnotationField(models.Model):
FIELD_CHOICES = (('C', 'Choice field'),
('S', 'String'),
('B', 'Boolean'),
('N', 'Numeric'))
annotation = models.ForeignKey(Annotation, on_delete=models.CASCADE, related_name='fields')
annotation_choice = models.CharField(max_length=1, choices=FIELD_CHOICES, default='C')
label = models.CharField(max_length=100)
def __str__(self):
return '{} {}'.format(self.annotation, self.label)
def save(self, *args, **kwargs):
super(AnnotationField, self).save(*args, **kwargs)
self.annotation.add_property(self)
def delete(self, using=None, keep_parents=False):
self.annotation.remove_property(self)
super(AnnotationField, self).delete(using=None, keep_parents=False)
class AnnotationChoice(models.Model):
annotation = models.ForeignKey(AnnotationField, on_delete=models.CASCADE, related_name='choices')
choice = models.CharField(max_length=100)
def __str__(self):
return '{} = {}'.format(self.annotation, self.choice)
| mit |
kkuunnddaannkk/vispy | examples/basics/gloo/multi_texture.py | 18 | 2149 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Example demonstrating (and testing) multi-texturing.
We create two textures. One that shows a red, green and blue band in
the horizontal direction and one that does the same in the vertical
direction. In the fragment shader the colors from both textures are
added.
"""
import numpy as np
from vispy import gloo
from vispy import app
# Images to be displayed
W, H = 30, 30
im1 = np.zeros((W, H, 3), np.float32)
im2 = np.zeros((W, H, 3), np.float32)
im1[:10, :, 0] = 1.0
im1[10:20, :, 1] = 1.0
im1[20:, :, 2] = 1.0
im2[:, :10, 0] = 1.0
im2[:, 10:20, 1] = 1.0
im1[:, 20:, 2] = 1.0
# A simple texture quad
data = np.zeros(4, dtype=[('a_position', np.float32, 2),
('a_texcoord', np.float32, 2)])
data['a_position'] = np.array([[-1, -1], [+1, -1], [-1, +1], [+1, +1]])
data['a_texcoord'] = np.array([[1, 0], [1, 1.2], [0, 0], [0, 1.2]])
VERT_SHADER = """
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main (void)
{
v_texcoord = a_texcoord;
gl_Position = vec4(a_position, 0.0, 1.0);
}
"""
FRAG_SHADER = """
uniform sampler2D u_tex1;
uniform sampler2D u_tex2;
varying vec2 v_texcoord;
void main()
{
vec3 clr1 = texture2D(u_tex1, v_texcoord).rgb;
vec3 clr2 = texture2D(u_tex2, v_texcoord).rgb;
gl_FragColor.rgb = clr1 + clr2;
gl_FragColor.a = 1.0;
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, size=(500, 500), keys='interactive')
self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)
self.program['u_tex1'] = gloo.Texture2D(im1, interpolation='linear')
self.program['u_tex2'] = gloo.Texture2D(im2, interpolation='linear')
self.program.bind(gloo.VertexBuffer(data))
gloo.set_clear_color('white')
self.show()
def on_resize(self, event):
width, height = event.physical_size
gloo.set_viewport(0, 0, width, height)
def on_draw(self, event):
gloo.clear(color=True, depth=True)
self.program.draw('triangle_strip')
if __name__ == '__main__':
c = Canvas()
app.run()
| bsd-3-clause |
dednal/chromium.src | third_party/cython/src/Cython/Compiler/PyrexTypes.py | 87 | 136844 | #
# Cython/Python language types
#
from Code import UtilityCode, LazyUtilityCode, TempitaUtilityCode
import StringEncoding
import Naming
import copy
from Errors import error
class BaseType(object):
#
# Base class for all Cython types including pseudo-types.
# List of attribute names of any subtypes
subtypes = []
def can_coerce_to_pyobject(self, env):
return False
def cast_code(self, expr_code):
return "((%s)%s)" % (self.declaration_code(""), expr_code)
def specialization_name(self):
# This is not entirely robust.
safe = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789'
all = []
for c in self.declaration_code("").replace("unsigned ", "unsigned_").replace("long long", "long_long").replace(" ", "__"):
if c in safe:
all.append(c)
else:
all.append('_%x_' % ord(c))
return ''.join(all)
def base_declaration_code(self, base_code, entity_code):
if entity_code:
return "%s %s" % (base_code, entity_code)
else:
return base_code
def __deepcopy__(self, memo):
"""
Types never need to be copied, if we do copy, Unfortunate Things
Will Happen!
"""
return self
def get_fused_types(self, result=None, seen=None, subtypes=None):
subtypes = subtypes or self.subtypes
if subtypes:
if result is None:
result = []
seen = set()
for attr in subtypes:
list_or_subtype = getattr(self, attr)
if list_or_subtype:
if isinstance(list_or_subtype, BaseType):
list_or_subtype.get_fused_types(result, seen)
else:
for subtype in list_or_subtype:
subtype.get_fused_types(result, seen)
return result
return None
def specialize_fused(self, env):
if env.fused_to_specific:
return self.specialize(env.fused_to_specific)
return self
def _get_fused_types(self):
"""
Add this indirection for the is_fused property to allow overriding
get_fused_types in subclasses.
"""
return self.get_fused_types()
is_fused = property(_get_fused_types, doc="Whether this type or any of its "
"subtypes is a fused type")
def deduce_template_params(self, actual):
"""
Deduce any template params in this (argument) type given the actual
argument type.
http://en.cppreference.com/w/cpp/language/function_template#Template_argument_deduction
"""
if self == actual:
return {}
else:
return None
def __lt__(self, other):
"""
For sorting. The sorting order should correspond to the preference of
conversion from Python types.
Override to provide something sensible. This is only implemented so that
python 3 doesn't trip
"""
return id(type(self)) < id(type(other))
def py_type_name(self):
"""
Return the name of the Python type that can coerce to this type.
"""
def typeof_name(self):
"""
Return the string with which fused python functions can be indexed.
"""
if self.is_builtin_type or self.py_type_name() == 'object':
index_name = self.py_type_name()
else:
index_name = str(self)
return index_name
def check_for_null_code(self, cname):
"""
Return the code for a NULL-check in case an UnboundLocalError should
be raised if an entry of this type is referenced before assignment.
Returns None if no check should be performed.
"""
return None
def invalid_value(self):
"""
Returns the most invalid value an object of this type can assume as a
C expression string. Returns None if no such value exists.
"""
class PyrexType(BaseType):
#
# Base class for all Cython types
#
# is_pyobject boolean Is a Python object type
# is_extension_type boolean Is a Python extension type
# is_final_type boolean Is a final extension type
# is_numeric boolean Is a C numeric type
# is_int boolean Is a C integer type
# is_float boolean Is a C floating point type
# is_complex boolean Is a C complex type
# is_void boolean Is the C void type
# is_array boolean Is a C array type
# is_ptr boolean Is a C pointer type
# is_null_ptr boolean Is the type of NULL
# is_reference boolean Is a C reference type
# is_const boolean Is a C const type.
# is_cfunction boolean Is a C function type
# is_struct_or_union boolean Is a C struct or union type
# is_struct boolean Is a C struct type
# is_enum boolean Is a C enum type
# is_typedef boolean Is a typedef type
# is_string boolean Is a C char * type
# is_pyunicode_ptr boolean Is a C PyUNICODE * type
# is_cpp_string boolean Is a C++ std::string type
# is_unicode_char boolean Is either Py_UCS4 or Py_UNICODE
# is_returncode boolean Is used only to signal exceptions
# is_error boolean Is the dummy error type
# is_buffer boolean Is buffer access type
# has_attributes boolean Has C dot-selectable attributes
# default_value string Initial value
# entry Entry The Entry for this type
#
# declaration_code(entity_code,
# for_display = 0, dll_linkage = None, pyrex = 0)
# Returns a code fragment for the declaration of an entity
# of this type, given a code fragment for the entity.
# * If for_display, this is for reading by a human in an error
# message; otherwise it must be valid C code.
# * If dll_linkage is not None, it must be 'DL_EXPORT' or
# 'DL_IMPORT', and will be added to the base type part of
# the declaration.
# * If pyrex = 1, this is for use in a 'cdef extern'
# statement of a Cython include file.
#
# assignable_from(src_type)
# Tests whether a variable of this type can be
# assigned a value of type src_type.
#
# same_as(other_type)
# Tests whether this type represents the same type
# as other_type.
#
# as_argument_type():
# Coerces array and C function types into pointer type for use as
# a formal argument type.
#
is_pyobject = 0
is_unspecified = 0
is_extension_type = 0
is_final_type = 0
is_builtin_type = 0
is_numeric = 0
is_int = 0
is_float = 0
is_complex = 0
is_void = 0
is_array = 0
is_ptr = 0
is_null_ptr = 0
is_reference = 0
is_const = 0
is_cfunction = 0
is_struct_or_union = 0
is_cpp_class = 0
is_cpp_string = 0
is_struct = 0
is_enum = 0
is_typedef = 0
is_string = 0
is_pyunicode_ptr = 0
is_unicode_char = 0
is_returncode = 0
is_error = 0
is_buffer = 0
is_memoryviewslice = 0
has_attributes = 0
default_value = ""
def resolve(self):
# If a typedef, returns the base type.
return self
def specialize(self, values):
# TODO(danilo): Override wherever it makes sense.
return self
def literal_code(self, value):
# Returns a C code fragment representing a literal
# value of this type.
return str(value)
def __str__(self):
return self.declaration_code("", for_display = 1).strip()
def same_as(self, other_type, **kwds):
return self.same_as_resolved_type(other_type.resolve(), **kwds)
def same_as_resolved_type(self, other_type):
return self == other_type or other_type is error_type
def subtype_of(self, other_type):
return self.subtype_of_resolved_type(other_type.resolve())
def subtype_of_resolved_type(self, other_type):
return self.same_as(other_type)
def assignable_from(self, src_type):
return self.assignable_from_resolved_type(src_type.resolve())
def assignable_from_resolved_type(self, src_type):
return self.same_as(src_type)
def as_argument_type(self):
return self
def is_complete(self):
# A type is incomplete if it is an unsized array,
# a struct whose attributes are not defined, etc.
return 1
def is_simple_buffer_dtype(self):
return (self.is_int or self.is_float or self.is_complex or self.is_pyobject or
self.is_extension_type or self.is_ptr)
def struct_nesting_depth(self):
# Returns the number levels of nested structs. This is
# used for constructing a stack for walking the run-time
# type information of the struct.
return 1
def global_init_code(self, entry, code):
# abstract
pass
def needs_nonecheck(self):
return 0
def public_decl(base_code, dll_linkage):
if dll_linkage:
return "%s(%s)" % (dll_linkage, base_code)
else:
return base_code
def create_typedef_type(name, base_type, cname, is_external=0):
is_fused = base_type.is_fused
if base_type.is_complex or is_fused:
if is_external:
if is_fused:
msg = "Fused"
else:
msg = "Complex"
raise ValueError("%s external typedefs not supported" % msg)
return base_type
else:
return CTypedefType(name, base_type, cname, is_external)
class CTypedefType(BaseType):
#
# Pseudo-type defined with a ctypedef statement in a
# 'cdef extern from' block.
# Delegates most attribute lookups to the base type.
# (Anything not defined here or in the BaseType is delegated.)
#
# qualified_name string
# typedef_name string
# typedef_cname string
# typedef_base_type PyrexType
# typedef_is_external bool
is_typedef = 1
typedef_is_external = 0
to_py_utility_code = None
from_py_utility_code = None
subtypes = ['typedef_base_type']
def __init__(self, name, base_type, cname, is_external=0):
assert not base_type.is_complex
self.typedef_name = name
self.typedef_cname = cname
self.typedef_base_type = base_type
self.typedef_is_external = is_external
def invalid_value(self):
return self.typedef_base_type.invalid_value()
def resolve(self):
return self.typedef_base_type.resolve()
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.typedef_name
else:
base_code = public_decl(self.typedef_cname, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def as_argument_type(self):
return self
def cast_code(self, expr_code):
# If self is really an array (rather than pointer), we can't cast.
# For example, the gmp mpz_t.
if self.typedef_base_type.is_array:
base_type = self.typedef_base_type.base_type
return CPtrType(base_type).cast_code(expr_code)
else:
return BaseType.cast_code(self, expr_code)
def __repr__(self):
return "<CTypedefType %s>" % self.typedef_cname
def __str__(self):
return self.typedef_name
def _create_utility_code(self, template_utility_code,
template_function_name):
type_name = self.typedef_cname.replace(" ","_").replace("::","__")
utility_code = template_utility_code.specialize(
type = self.typedef_cname,
TypeName = type_name)
function_name = template_function_name % type_name
return utility_code, function_name
def create_to_py_utility_code(self, env):
if self.typedef_is_external:
if not self.to_py_utility_code:
base_type = self.typedef_base_type
if type(base_type) is CIntType:
self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load(
"CIntToPy", "TypeConversion.c",
context={"TYPE": self.declaration_code(''),
"TO_PY_FUNCTION": self.to_py_function}))
return True
elif base_type.is_float:
pass # XXX implement!
elif base_type.is_complex:
pass # XXX implement!
pass
if self.to_py_utility_code:
env.use_utility_code(self.to_py_utility_code)
return True
# delegation
return self.typedef_base_type.create_to_py_utility_code(env)
def create_from_py_utility_code(self, env):
if self.typedef_is_external:
if not self.from_py_utility_code:
base_type = self.typedef_base_type
if type(base_type) is CIntType:
self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load(
"CIntFromPy", "TypeConversion.c",
context={"TYPE": self.declaration_code(''),
"FROM_PY_FUNCTION": self.from_py_function}))
return True
elif base_type.is_float:
pass # XXX implement!
elif base_type.is_complex:
pass # XXX implement!
if self.from_py_utility_code:
env.use_utility_code(self.from_py_utility_code)
return True
# delegation
return self.typedef_base_type.create_from_py_utility_code(env)
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
type = self.declaration_code("")
name = self.specialization_name()
if binop == "lshift":
env.use_utility_code(TempitaUtilityCode.load(
"LeftShift", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
else:
if const_rhs:
binop += "_const"
_load_overflow_base(env)
env.use_utility_code(TempitaUtilityCode.load(
"SizeCheck", "Overflow.c",
context={'TYPE': type, 'NAME': name}))
env.use_utility_code(TempitaUtilityCode.load(
"Binop", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
return "__Pyx_%s_%s_checking_overflow" % (binop, name)
def error_condition(self, result_code):
if self.typedef_is_external:
if self.exception_value:
condition = "(%s == (%s)%s)" % (
result_code, self.typedef_cname, self.exception_value)
if self.exception_check:
condition += " && PyErr_Occurred()"
return condition
# delegation
return self.typedef_base_type.error_condition(result_code)
def __getattr__(self, name):
return getattr(self.typedef_base_type, name)
def py_type_name(self):
return self.typedef_base_type.py_type_name()
def can_coerce_to_pyobject(self, env):
return self.typedef_base_type.can_coerce_to_pyobject(env)
class MemoryViewSliceType(PyrexType):
is_memoryviewslice = 1
has_attributes = 1
scope = None
# These are special cased in Defnode
from_py_function = None
to_py_function = None
exception_value = None
exception_check = True
subtypes = ['dtype']
def __init__(self, base_dtype, axes):
"""
MemoryViewSliceType(base, axes)
Base is the C base type; axes is a list of (access, packing) strings,
where access is one of 'full', 'direct' or 'ptr' and packing is one of
'contig', 'strided' or 'follow'. There is one (access, packing) tuple
for each dimension.
the access specifiers determine whether the array data contains
pointers that need to be dereferenced along that axis when
retrieving/setting:
'direct' -- No pointers stored in this dimension.
'ptr' -- Pointer stored in this dimension.
'full' -- Check along this dimension, don't assume either.
the packing specifiers specify how the array elements are layed-out
in memory.
'contig' -- The data are contiguous in memory along this dimension.
At most one dimension may be specified as 'contig'.
'strided' -- The data aren't contiguous along this dimenison.
'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension
has its stride automatically computed from extents of the other
dimensions to ensure C or Fortran memory layout.
C-contiguous memory has 'direct' as the access spec, 'contig' as the
*last* axis' packing spec and 'follow' for all other packing specs.
Fortran-contiguous memory has 'direct' as the access spec, 'contig' as
the *first* axis' packing spec and 'follow' for all other packing
specs.
"""
import MemoryView
self.dtype = base_dtype
self.axes = axes
self.ndim = len(axes)
self.flags = MemoryView.get_buf_flags(self.axes)
self.is_c_contig, self.is_f_contig = MemoryView.is_cf_contig(self.axes)
assert not (self.is_c_contig and self.is_f_contig)
self.mode = MemoryView.get_mode(axes)
self.writable_needed = False
if not self.dtype.is_fused:
self.dtype_name = MemoryView.mangle_dtype_name(self.dtype)
def same_as_resolved_type(self, other_type):
return ((other_type.is_memoryviewslice and
self.dtype.same_as(other_type.dtype) and
self.axes == other_type.axes) or
other_type is error_type)
def needs_nonecheck(self):
return True
def is_complete(self):
# incomplete since the underlying struct doesn't have a cython.memoryview object.
return 0
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
# XXX: we put these guards in for now...
assert not pyrex
assert not dll_linkage
import MemoryView
return self.base_declaration_code(
MemoryView.memviewslice_cname,
entity_code)
def attributes_known(self):
if self.scope is None:
import Symtab
self.scope = scope = Symtab.CClassScope(
'mvs_class_'+self.specialization_suffix(),
None,
visibility='extern')
scope.parent_type = self
scope.directives = {}
scope.declare_var('_data', c_char_ptr_type, None,
cname='data', is_cdef=1)
return True
def declare_attribute(self, attribute, env, pos):
import MemoryView, Options
scope = self.scope
if attribute == 'shape':
scope.declare_var('shape',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='shape',
is_cdef=1)
elif attribute == 'strides':
scope.declare_var('strides',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='strides',
is_cdef=1)
elif attribute == 'suboffsets':
scope.declare_var('suboffsets',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='suboffsets',
is_cdef=1)
elif attribute in ("copy", "copy_fortran"):
ndim = len(self.axes)
to_axes_c = [('direct', 'contig')]
to_axes_f = [('direct', 'contig')]
if ndim - 1:
to_axes_c = [('direct', 'follow')]*(ndim-1) + to_axes_c
to_axes_f = to_axes_f + [('direct', 'follow')]*(ndim-1)
to_memview_c = MemoryViewSliceType(self.dtype, to_axes_c)
to_memview_f = MemoryViewSliceType(self.dtype, to_axes_f)
for to_memview, cython_name in [(to_memview_c, "copy"),
(to_memview_f, "copy_fortran")]:
entry = scope.declare_cfunction(cython_name,
CFuncType(self, [CFuncTypeArg("memviewslice", self, None)]),
pos=pos,
defining=1,
cname=MemoryView.copy_c_or_fortran_cname(to_memview))
#entry.utility_code_definition = \
env.use_utility_code(MemoryView.get_copy_new_utility(pos, self, to_memview))
MemoryView.use_cython_array_utility_code(env)
elif attribute in ("is_c_contig", "is_f_contig"):
# is_c_contig and is_f_contig functions
for (c_or_f, cython_name) in (('c', 'is_c_contig'), ('f', 'is_f_contig')):
is_contig_name = \
MemoryView.get_is_contig_func_name(c_or_f, self.ndim)
cfunctype = CFuncType(
return_type=c_bint_type,
args=[CFuncTypeArg("memviewslice", self, None)],
exception_value="-1",
)
entry = scope.declare_cfunction(cython_name,
cfunctype,
pos=pos,
defining=1,
cname=is_contig_name)
entry.utility_code_definition = MemoryView.get_is_contig_utility(
attribute == 'is_c_contig', self.ndim)
return True
def specialization_suffix(self):
return "%s_%s" % (self.axes_to_name(), self.dtype_name)
def can_coerce_to_pyobject(self, env):
return True
def check_for_null_code(self, cname):
return cname + '.memview'
def create_from_py_utility_code(self, env):
import MemoryView, Buffer
# We don't have 'code', so use a LazyUtilityCode with a callback.
def lazy_utility_callback(code):
context['dtype_typeinfo'] = Buffer.get_type_information_cname(
code, self.dtype)
return TempitaUtilityCode.load(
"ObjectToMemviewSlice", "MemoryView_C.c", context=context)
env.use_utility_code(Buffer.acquire_utility_code)
env.use_utility_code(MemoryView.memviewslice_init_code)
env.use_utility_code(LazyUtilityCode(lazy_utility_callback))
if self.is_c_contig:
c_or_f_flag = "__Pyx_IS_C_CONTIG"
elif self.is_f_contig:
c_or_f_flag = "__Pyx_IS_F_CONTIG"
else:
c_or_f_flag = "0"
suffix = self.specialization_suffix()
funcname = "__Pyx_PyObject_to_MemoryviewSlice_" + suffix
context = dict(
MemoryView.context,
buf_flag = self.flags,
ndim = self.ndim,
axes_specs = ', '.join(self.axes_to_code()),
dtype_typedecl = self.dtype.declaration_code(""),
struct_nesting_depth = self.dtype.struct_nesting_depth(),
c_or_f_flag = c_or_f_flag,
funcname = funcname,
)
self.from_py_function = funcname
return True
def create_to_py_utility_code(self, env):
return True
def get_to_py_function(self, env, obj):
to_py_func, from_py_func = self.dtype_object_conversion_funcs(env)
to_py_func = "(PyObject *(*)(char *)) " + to_py_func
from_py_func = "(int (*)(char *, PyObject *)) " + from_py_func
tup = (obj.result(), self.ndim, to_py_func, from_py_func,
self.dtype.is_pyobject)
return "__pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup
def dtype_object_conversion_funcs(self, env):
get_function = "__pyx_memview_get_%s" % self.dtype_name
set_function = "__pyx_memview_set_%s" % self.dtype_name
context = dict(
get_function = get_function,
set_function = set_function,
)
if self.dtype.is_pyobject:
utility_name = "MemviewObjectToObject"
else:
to_py = self.dtype.create_to_py_utility_code(env)
from_py = self.dtype.create_from_py_utility_code(env)
if not (to_py or from_py):
return "NULL", "NULL"
if not self.dtype.to_py_function:
get_function = "NULL"
if not self.dtype.from_py_function:
set_function = "NULL"
utility_name = "MemviewDtypeToObject"
error_condition = (self.dtype.error_condition('value') or
'PyErr_Occurred()')
context.update(
to_py_function = self.dtype.to_py_function,
from_py_function = self.dtype.from_py_function,
dtype = self.dtype.declaration_code(""),
error_condition = error_condition,
)
utility = TempitaUtilityCode.load(
utility_name, "MemoryView_C.c", context=context)
env.use_utility_code(utility)
return get_function, set_function
def axes_to_code(self):
"""Return a list of code constants for each axis"""
import MemoryView
d = MemoryView._spec_to_const
return ["(%s | %s)" % (d[a], d[p]) for a, p in self.axes]
def axes_to_name(self):
"""Return an abbreviated name for our axes"""
import MemoryView
d = MemoryView._spec_to_abbrev
return "".join(["%s%s" % (d[a], d[p]) for a, p in self.axes])
def error_condition(self, result_code):
return "!%s.memview" % result_code
def __str__(self):
import MemoryView
axes_code_list = []
for idx, (access, packing) in enumerate(self.axes):
flag = MemoryView.get_memoryview_flag(access, packing)
if flag == "strided":
axes_code_list.append(":")
else:
if flag == 'contiguous':
have_follow = [p for a, p in self.axes[idx - 1:idx + 2]
if p == 'follow']
if have_follow or self.ndim == 1:
flag = '1'
axes_code_list.append("::" + flag)
if self.dtype.is_pyobject:
dtype_name = self.dtype.name
else:
dtype_name = self.dtype
return "%s[%s]" % (dtype_name, ", ".join(axes_code_list))
def specialize(self, values):
"""This does not validate the base type!!"""
dtype = self.dtype.specialize(values)
if dtype is not self.dtype:
return MemoryViewSliceType(dtype, self.axes)
return self
def cast_code(self, expr_code):
return expr_code
class BufferType(BaseType):
#
# Delegates most attribute lookups to the base type.
# (Anything not defined here or in the BaseType is delegated.)
#
# dtype PyrexType
# ndim int
# mode str
# negative_indices bool
# cast bool
# is_buffer bool
# writable bool
is_buffer = 1
writable = True
subtypes = ['dtype']
def __init__(self, base, dtype, ndim, mode, negative_indices, cast):
self.base = base
self.dtype = dtype
self.ndim = ndim
self.buffer_ptr_type = CPtrType(dtype)
self.mode = mode
self.negative_indices = negative_indices
self.cast = cast
def as_argument_type(self):
return self
def specialize(self, values):
dtype = self.dtype.specialize(values)
if dtype is not self.dtype:
return BufferType(self.base, dtype, self.ndim, self.mode,
self.negative_indices, self.cast)
return self
def __getattr__(self, name):
return getattr(self.base, name)
def __repr__(self):
return "<BufferType %r>" % self.base
def __str__(self):
# avoid ', ', as fused functions split the signature string on ', '
cast_str = ''
if self.cast:
cast_str = ',cast=True'
return "%s[%s,ndim=%d%s]" % (self.base, self.dtype, self.ndim,
cast_str)
def assignable_from(self, other_type):
if other_type.is_buffer:
return (self.same_as(other_type, compare_base=False) and
self.base.assignable_from(other_type.base))
return self.base.assignable_from(other_type)
def same_as(self, other_type, compare_base=True):
if not other_type.is_buffer:
return other_type.same_as(self.base)
return (self.dtype.same_as(other_type.dtype) and
self.ndim == other_type.ndim and
self.mode == other_type.mode and
self.cast == other_type.cast and
(not compare_base or self.base.same_as(other_type.base)))
class PyObjectType(PyrexType):
#
# Base class for all Python object types (reference-counted).
#
# buffer_defaults dict or None Default options for bu
name = "object"
is_pyobject = 1
default_value = "0"
buffer_defaults = None
is_extern = False
is_subclassed = False
is_gc_simple = False
def __str__(self):
return "Python object"
def __repr__(self):
return "<PyObjectType>"
def can_coerce_to_pyobject(self, env):
return True
def default_coerced_ctype(self):
"""The default C type that this Python type coerces to, or None."""
return None
def assignable_from(self, src_type):
# except for pointers, conversion will be attempted
return not src_type.is_ptr or src_type.is_string or src_type.is_pyunicode_ptr
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "object"
else:
base_code = public_decl("PyObject", dll_linkage)
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def as_pyobject(self, cname):
if (not self.is_complete()) or self.is_extension_type:
return "(PyObject *)" + cname
else:
return cname
def py_type_name(self):
return "object"
def __lt__(self, other):
"""
Make sure we sort highest, as instance checking on py_type_name
('object') is always true
"""
return False
def global_init_code(self, entry, code):
code.put_init_var_to_py_none(entry, nanny=False)
def check_for_null_code(self, cname):
return cname
builtin_types_that_cannot_create_refcycles = set([
'bool', 'int', 'long', 'float', 'complex',
'bytearray', 'bytes', 'unicode', 'str', 'basestring'
])
class BuiltinObjectType(PyObjectType):
# objstruct_cname string Name of PyObject struct
is_builtin_type = 1
has_attributes = 1
base_type = None
module_name = '__builtin__'
# fields that let it look like an extension type
vtabslot_cname = None
vtabstruct_cname = None
vtabptr_cname = None
typedef_flag = True
is_external = True
def __init__(self, name, cname, objstruct_cname=None):
self.name = name
self.cname = cname
self.typeptr_cname = "(&%s)" % cname
self.objstruct_cname = objstruct_cname
self.is_gc_simple = name in builtin_types_that_cannot_create_refcycles
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def __str__(self):
return "%s object" % self.name
def __repr__(self):
return "<%s>"% self.cname
def default_coerced_ctype(self):
if self.name in ('bytes', 'bytearray'):
return c_char_ptr_type
elif self.name == 'bool':
return c_bint_type
elif self.name == 'float':
return c_double_type
return None
def assignable_from(self, src_type):
if isinstance(src_type, BuiltinObjectType):
if self.name == 'basestring':
return src_type.name in ('str', 'unicode', 'basestring')
else:
return src_type.name == self.name
elif src_type.is_extension_type:
# FIXME: This is an ugly special case that we currently
# keep supporting. It allows users to specify builtin
# types as external extension types, while keeping them
# compatible with the real builtin types. We already
# generate a warning for it. Big TODO: remove!
return (src_type.module_name == '__builtin__' and
src_type.name == self.name)
else:
return True
def typeobj_is_available(self):
return True
def attributes_known(self):
return True
def subtype_of(self, type):
return type.is_pyobject and type.assignable_from(self)
def type_check_function(self, exact=True):
type_name = self.name
if type_name == 'str':
type_check = 'PyString_Check'
elif type_name == 'basestring':
type_check = '__Pyx_PyBaseString_Check'
elif type_name == 'bytearray':
type_check = 'PyByteArray_Check'
elif type_name == 'frozenset':
type_check = 'PyFrozenSet_Check'
else:
type_check = 'Py%s_Check' % type_name.capitalize()
if exact and type_name not in ('bool', 'slice'):
type_check += 'Exact'
return type_check
def isinstance_code(self, arg):
return '%s(%s)' % (self.type_check_function(exact=False), arg)
def type_test_code(self, arg, notnone=False, exact=True):
type_check = self.type_check_function(exact=exact)
check = 'likely(%s(%s))' % (type_check, arg)
if not notnone:
check += '||((%s) == Py_None)' % arg
if self.name == 'basestring':
name = '(PY_MAJOR_VERSION < 3 ? "basestring" : "str")'
space_for_name = 16
else:
name = '"%s"' % self.name
# avoid wasting too much space but limit number of different format strings
space_for_name = (len(self.name) // 16 + 1) * 16
error = '(PyErr_Format(PyExc_TypeError, "Expected %%.%ds, got %%.200s", %s, Py_TYPE(%s)->tp_name), 0)' % (
space_for_name, name, arg)
return check + '||' + error
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
else:
base_code = public_decl("PyObject", dll_linkage)
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def cast_code(self, expr_code, to_object_struct = False):
return "((%s*)%s)" % (
to_object_struct and self.objstruct_cname or "PyObject", # self.objstruct_cname may be None
expr_code)
def py_type_name(self):
return self.name
class PyExtensionType(PyObjectType):
#
# A Python extension type.
#
# name string
# scope CClassScope Attribute namespace
# visibility string
# typedef_flag boolean
# base_type PyExtensionType or None
# module_name string or None Qualified name of defining module
# objstruct_cname string Name of PyObject struct
# objtypedef_cname string Name of PyObject struct typedef
# typeobj_cname string or None C code fragment referring to type object
# typeptr_cname string or None Name of pointer to external type object
# vtabslot_cname string Name of C method table member
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
# defered_declarations [thunk] Used to declare class hierarchies in order
is_extension_type = 1
has_attributes = 1
objtypedef_cname = None
def __init__(self, name, typedef_flag, base_type, is_external=0):
self.name = name
self.scope = None
self.typedef_flag = typedef_flag
if base_type is not None:
base_type.is_subclassed = True
self.base_type = base_type
self.module_name = None
self.objstruct_cname = None
self.typeobj_cname = None
self.typeptr_cname = None
self.vtabslot_cname = None
self.vtabstruct_cname = None
self.vtabptr_cname = None
self.vtable_cname = None
self.is_external = is_external
self.defered_declarations = []
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def needs_nonecheck(self):
return True
def subtype_of_resolved_type(self, other_type):
if other_type.is_extension_type or other_type.is_builtin_type:
return self is other_type or (
self.base_type and self.base_type.subtype_of(other_type))
else:
return other_type is py_object_type
def typeobj_is_available(self):
# Do we have a pointer to the type object?
return self.typeptr_cname
def typeobj_is_imported(self):
# If we don't know the C name of the type object but we do
# know which module it's defined in, it will be imported.
return self.typeobj_cname is None and self.module_name is not None
def assignable_from(self, src_type):
if self == src_type:
return True
if isinstance(src_type, PyExtensionType):
if src_type.base_type is not None:
return self.assignable_from(src_type.base_type)
if isinstance(src_type, BuiltinObjectType):
# FIXME: This is an ugly special case that we currently
# keep supporting. It allows users to specify builtin
# types as external extension types, while keeping them
# compatible with the real builtin types. We already
# generate a warning for it. Big TODO: remove!
return (self.module_name == '__builtin__' and
self.name == src_type.name)
return False
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0, deref = 0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
objstruct = self.objstruct_cname
else:
objstruct = "struct %s" % self.objstruct_cname
base_code = public_decl(objstruct, dll_linkage)
if deref:
assert not entity_code
else:
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def type_test_code(self, py_arg, notnone=False):
none_check = "((%s) == Py_None)" % py_arg
type_check = "likely(__Pyx_TypeTest(%s, %s))" % (
py_arg, self.typeptr_cname)
if notnone:
return type_check
else:
return "likely(%s || %s)" % (none_check, type_check)
def attributes_known(self):
return self.scope is not None
def __str__(self):
return self.name
def __repr__(self):
return "<PyExtensionType %s%s>" % (self.scope.class_name,
("", " typedef")[self.typedef_flag])
def py_type_name(self):
if not self.module_name:
return self.name
return "__import__(%r, None, None, ['']).%s" % (self.module_name,
self.name)
class CType(PyrexType):
#
# Base class for all C types (non-reference-counted).
#
# to_py_function string C function for converting to Python object
# from_py_function string C function for constructing from Python object
#
to_py_function = None
from_py_function = None
exception_value = None
exception_check = 1
def create_to_py_utility_code(self, env):
return self.to_py_function is not None
def create_from_py_utility_code(self, env):
return self.from_py_function is not None
def can_coerce_to_pyobject(self, env):
return self.create_to_py_utility_code(env)
def error_condition(self, result_code):
conds = []
if self.is_string or self.is_pyunicode_ptr:
conds.append("(!%s)" % result_code)
elif self.exception_value is not None:
conds.append("(%s == (%s)%s)" % (result_code, self.sign_and_name(), self.exception_value))
if self.exception_check:
conds.append("PyErr_Occurred()")
if len(conds) > 0:
return " && ".join(conds)
else:
return 0
class CConstType(BaseType):
is_const = 1
def __init__(self, const_base_type):
self.const_base_type = const_base_type
if const_base_type.has_attributes and const_base_type.scope is not None:
import Symtab
self.scope = Symtab.CConstScope(const_base_type.scope)
def __repr__(self):
return "<CConstType %s>" % repr(self.const_base_type)
def __str__(self):
return self.declaration_code("", for_display=1)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return self.const_base_type.declaration_code("const %s" % entity_code, for_display, dll_linkage, pyrex)
def specialize(self, values):
base_type = self.const_base_type.specialize(values)
if base_type == self.const_base_type:
return self
else:
return CConstType(base_type)
def deduce_template_params(self, actual):
return self.const_base_type.deduce_template_params(actual)
def create_to_py_utility_code(self, env):
if self.const_base_type.create_to_py_utility_code(env):
self.to_py_function = self.const_base_type.to_py_function
return True
def __getattr__(self, name):
return getattr(self.const_base_type, name)
class FusedType(CType):
"""
Represents a Fused Type. All it needs to do is keep track of the types
it aggregates, as it will be replaced with its specific version wherever
needed.
See http://wiki.cython.org/enhancements/fusedtypes
types [PyrexType] is the list of types to be fused
name str the name of the ctypedef
"""
is_fused = 1
exception_check = 0
def __init__(self, types, name=None):
self.types = types
self.name = name
def declaration_code(self, entity_code, for_display = 0,
dll_linkage = None, pyrex = 0):
if pyrex or for_display:
return self.name
raise Exception("This may never happen, please report a bug")
def __repr__(self):
return 'FusedType(name=%r)' % self.name
def specialize(self, values):
return values[self]
def get_fused_types(self, result=None, seen=None):
if result is None:
return [self]
if self not in seen:
result.append(self)
seen.add(self)
class CVoidType(CType):
#
# C "void" type
#
is_void = 1
def __repr__(self):
return "<CVoidType>"
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "void"
else:
base_code = public_decl("void", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def is_complete(self):
return 0
class InvisibleVoidType(CVoidType):
#
# For use with C++ constructors and destructors return types.
# Acts like void, but does not print out a declaration.
#
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "[void]"
else:
base_code = public_decl("", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
class CNumericType(CType):
#
# Base class for all C numeric types.
#
# rank integer Relative size
# signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed
#
is_numeric = 1
default_value = "0"
has_attributes = True
scope = None
sign_words = ("unsigned ", "", "signed ")
def __init__(self, rank, signed = 1):
self.rank = rank
self.signed = signed
def sign_and_name(self):
s = self.sign_words[self.signed]
n = rank_to_type_name[self.rank]
return s + n
def __repr__(self):
return "<CNumericType %s>" % self.sign_and_name()
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
type_name = self.sign_and_name()
if pyrex or for_display:
base_code = type_name.replace('PY_LONG_LONG', 'long long')
else:
base_code = public_decl(type_name, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def attributes_known(self):
if self.scope is None:
import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern")
scope.parent_type = self
scope.directives = {}
scope.declare_cfunction(
"conjugate",
CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
pos=None,
defining=1,
cname=" ")
return True
def __lt__(self, other):
"""Sort based on rank, preferring signed over unsigned"""
if other.is_numeric:
return self.rank > other.rank and self.signed >= other.signed
# Prefer numeric types over others
return True
def py_type_name(self):
if self.rank <= 4:
return "(int, long)"
return "float"
class ForbidUseClass:
def __repr__(self):
raise RuntimeError()
def __str__(self):
raise RuntimeError()
ForbidUse = ForbidUseClass()
class CIntType(CNumericType):
is_int = 1
typedef_flag = 0
to_py_function = None
from_py_function = None
exception_value = -1
def create_to_py_utility_code(self, env):
if type(self).to_py_function is None:
self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load(
"CIntToPy", "TypeConversion.c",
context={"TYPE": self.declaration_code(''),
"TO_PY_FUNCTION": self.to_py_function}))
return True
def create_from_py_utility_code(self, env):
if type(self).from_py_function is None:
self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load(
"CIntFromPy", "TypeConversion.c",
context={"TYPE": self.declaration_code(''),
"FROM_PY_FUNCTION": self.from_py_function}))
return True
def get_to_py_type_conversion(self):
if self.rank < list(rank_to_type_name).index('int'):
# This assumes sizeof(short) < sizeof(int)
return "PyInt_FromLong"
else:
# Py{Int|Long}_From[Unsigned]Long[Long]
Prefix = "Int"
SignWord = ""
TypeName = "Long"
if not self.signed:
Prefix = "Long"
SignWord = "Unsigned"
if self.rank >= list(rank_to_type_name).index('PY_LONG_LONG'):
Prefix = "Long"
TypeName = "LongLong"
return "Py%s_From%s%s" % (Prefix, SignWord, TypeName)
def get_from_py_type_conversion(self):
type_name = rank_to_type_name[self.rank]
type_name = type_name.replace("PY_LONG_LONG", "long long")
TypeName = type_name.title().replace(" ", "")
SignWord = self.sign_words[self.signed].strip().title()
if self.rank >= list(rank_to_type_name).index('long'):
utility_code = c_long_from_py_function
else:
utility_code = c_int_from_py_function
utility_code.specialize(self,
SignWord=SignWord,
TypeName=TypeName)
func_name = "__Pyx_PyInt_As%s%s" % (SignWord, TypeName)
return func_name
def assignable_from_resolved_type(self, src_type):
return src_type.is_int or src_type.is_enum or src_type is error_type
def invalid_value(self):
if rank_to_type_name[int(self.rank)] == 'char':
return "'?'"
else:
# We do not really know the size of the type, so return
# a 32-bit literal and rely on casting to final type. It will
# be negative for signed ints, which is good.
return "0xbad0bad0"
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
type = self.declaration_code("")
name = self.specialization_name()
if binop == "lshift":
env.use_utility_code(TempitaUtilityCode.load(
"LeftShift", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
else:
if const_rhs:
binop += "_const"
if type in ('int', 'long', 'long long'):
env.use_utility_code(TempitaUtilityCode.load(
"BaseCaseSigned", "Overflow.c",
context={'INT': type, 'NAME': name}))
elif type in ('unsigned int', 'unsigned long', 'unsigned long long'):
env.use_utility_code(TempitaUtilityCode.load(
"BaseCaseUnsigned", "Overflow.c",
context={'UINT': type, 'NAME': name}))
elif self.rank <= 1:
# sizeof(short) < sizeof(int)
return "__Pyx_%s_%s_no_overflow" % (binop, name)
else:
_load_overflow_base(env)
env.use_utility_code(TempitaUtilityCode.load(
"SizeCheck", "Overflow.c",
context={'TYPE': type, 'NAME': name}))
env.use_utility_code(TempitaUtilityCode.load(
"Binop", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
return "__Pyx_%s_%s_checking_overflow" % (binop, name)
def _load_overflow_base(env):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
for type in ('int', 'long', 'long long'):
env.use_utility_code(TempitaUtilityCode.load(
"BaseCaseSigned", "Overflow.c",
context={'INT': type, 'NAME': type.replace(' ', '_')}))
for type in ('unsigned int', 'unsigned long', 'unsigned long long'):
env.use_utility_code(TempitaUtilityCode.load(
"BaseCaseUnsigned", "Overflow.c",
context={'UINT': type, 'NAME': type.replace(' ', '_')}))
class CAnonEnumType(CIntType):
is_enum = 1
def sign_and_name(self):
return 'int'
class CReturnCodeType(CIntType):
to_py_function = "__Pyx_Owned_Py_None"
is_returncode = True
exception_check = False
class CBIntType(CIntType):
to_py_function = "__Pyx_PyBool_FromLong"
from_py_function = "__Pyx_PyObject_IsTrue"
exception_check = 1 # for C++ bool
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = 'bool'
else:
base_code = public_decl('int', dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def __repr__(self):
return "<CNumericType bint>"
def __str__(self):
return 'bint'
def py_type_name(self):
return "bool"
class CPyUCS4IntType(CIntType):
# Py_UCS4
is_unicode_char = True
# Py_UCS4 coerces from and to single character unicode strings (or
# at most two characters on 16bit Unicode builds), but we also
# allow Python integers as input. The value range for Py_UCS4
# is 0..1114111, which is checked when converting from an integer
# value.
to_py_function = "PyUnicode_FromOrdinal"
from_py_function = "__Pyx_PyObject_AsPy_UCS4"
def create_from_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached("ObjectAsUCS4", "TypeConversion.c"))
return True
def sign_and_name(self):
return "Py_UCS4"
class CPyUnicodeIntType(CIntType):
# Py_UNICODE
is_unicode_char = True
# Py_UNICODE coerces from and to single character unicode strings,
# but we also allow Python integers as input. The value range for
# Py_UNICODE is 0..1114111, which is checked when converting from
# an integer value.
to_py_function = "PyUnicode_FromOrdinal"
from_py_function = "__Pyx_PyObject_AsPy_UNICODE"
def create_from_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached("ObjectAsPyUnicode", "TypeConversion.c"))
return True
def sign_and_name(self):
return "Py_UNICODE"
class CPyHashTType(CIntType):
to_py_function = "__Pyx_PyInt_FromHash_t"
from_py_function = "__Pyx_PyInt_AsHash_t"
def sign_and_name(self):
return "Py_hash_t"
class CPySSizeTType(CIntType):
to_py_function = "PyInt_FromSsize_t"
from_py_function = "__Pyx_PyIndex_AsSsize_t"
def sign_and_name(self):
return "Py_ssize_t"
class CSSizeTType(CIntType):
to_py_function = "PyInt_FromSsize_t"
from_py_function = "PyInt_AsSsize_t"
def sign_and_name(self):
return "Py_ssize_t"
class CSizeTType(CIntType):
to_py_function = "__Pyx_PyInt_FromSize_t"
def sign_and_name(self):
return "size_t"
class CPtrdiffTType(CIntType):
def sign_and_name(self):
return "ptrdiff_t"
class CFloatType(CNumericType):
is_float = 1
to_py_function = "PyFloat_FromDouble"
from_py_function = "__pyx_PyFloat_AsDouble"
exception_value = -1
def __init__(self, rank, math_h_modifier = ''):
CNumericType.__init__(self, rank, 1)
self.math_h_modifier = math_h_modifier
if rank == RANK_FLOAT:
self.from_py_function = "__pyx_PyFloat_AsFloat"
def assignable_from_resolved_type(self, src_type):
return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type
def invalid_value(self):
return Naming.PYX_NAN
class CComplexType(CNumericType):
is_complex = 1
to_py_function = "__pyx_PyComplex_FromComplex"
has_attributes = 1
scope = None
def __init__(self, real_type):
while real_type.is_typedef and not real_type.typedef_is_external:
real_type = real_type.typedef_base_type
if real_type.is_typedef and real_type.typedef_is_external:
# The below is not actually used: Coercions are currently disabled
# so that complex types of external types can not be created
self.funcsuffix = "_%s" % real_type.specialization_name()
elif hasattr(real_type, 'math_h_modifier'):
self.funcsuffix = real_type.math_h_modifier
else:
self.funcsuffix = "_%s" % real_type.specialization_name()
self.real_type = real_type
CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed)
self.binops = {}
self.from_parts = "%s_from_parts" % self.specialization_name()
self.default_value = "%s(0, 0)" % self.from_parts
def __eq__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type == other.real_type
else:
return False
def __ne__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type != other.real_type
else:
return True
def __lt__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type < other.real_type
else:
# this is arbitrary, but it makes sure we always have
# *some* kind of order
return False
def __hash__(self):
return ~hash(self.real_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
real_code = self.real_type.declaration_code("", for_display, dll_linkage, pyrex)
base_code = "%s complex" % real_code
else:
base_code = public_decl(self.sign_and_name(), dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def sign_and_name(self):
real_type_name = self.real_type.specialization_name()
real_type_name = real_type_name.replace('long__double','long_double')
real_type_name = real_type_name.replace('PY_LONG_LONG','long_long')
return Naming.type_prefix + real_type_name + "_complex"
def assignable_from(self, src_type):
# Temporary hack/feature disabling, see #441
if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef
and src_type.typedef_is_external):
return False
else:
return super(CComplexType, self).assignable_from(src_type)
def assignable_from_resolved_type(self, src_type):
return (src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type)
or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type)
or src_type is error_type)
def attributes_known(self):
if self.scope is None:
import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern")
scope.parent_type = self
scope.directives = {}
scope.declare_var("real", self.real_type, None, cname="real", is_cdef=True)
scope.declare_var("imag", self.real_type, None, cname="imag", is_cdef=True)
scope.declare_cfunction(
"conjugate",
CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
pos=None,
defining=1,
cname="__Pyx_c_conj%s" % self.funcsuffix)
return True
def create_declaration_utility_code(self, env):
# This must always be run, because a single CComplexType instance can be shared
# across multiple compilations (the one created in the module scope)
env.use_utility_code(complex_header_utility_code)
env.use_utility_code(complex_real_imag_utility_code)
for utility_code in (complex_type_utility_code,
complex_from_parts_utility_code,
complex_arithmetic_utility_code):
env.use_utility_code(
utility_code.specialize(
self,
real_type = self.real_type.declaration_code(''),
m = self.funcsuffix,
is_float = self.real_type.is_float))
return True
def create_to_py_utility_code(self, env):
env.use_utility_code(complex_real_imag_utility_code)
env.use_utility_code(complex_to_py_utility_code)
return True
def create_from_py_utility_code(self, env):
self.real_type.create_from_py_utility_code(env)
for utility_code in (complex_from_parts_utility_code,
complex_from_py_utility_code):
env.use_utility_code(
utility_code.specialize(
self,
real_type = self.real_type.declaration_code(''),
m = self.funcsuffix,
is_float = self.real_type.is_float))
self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name()
return True
def lookup_op(self, nargs, op):
try:
return self.binops[nargs, op]
except KeyError:
pass
try:
op_name = complex_ops[nargs, op]
self.binops[nargs, op] = func_name = "__Pyx_c_%s%s" % (op_name, self.funcsuffix)
return func_name
except KeyError:
return None
def unary_op(self, op):
return self.lookup_op(1, op)
def binary_op(self, op):
return self.lookup_op(2, op)
def py_type_name(self):
return "complex"
def cast_code(self, expr_code):
return expr_code
complex_ops = {
(1, '-'): 'neg',
(1, 'zero'): 'is_zero',
(2, '+'): 'sum',
(2, '-'): 'diff',
(2, '*'): 'prod',
(2, '/'): 'quot',
(2, '=='): 'eq',
}
complex_header_utility_code = UtilityCode(
proto_block='h_code',
proto="""
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
""")
complex_real_imag_utility_code = UtilityCode(
proto="""
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
""")
complex_type_utility_code = UtilityCode(
proto_block='complex_type_declarations',
proto="""
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< %(real_type)s > %(type_name)s;
#else
typedef %(real_type)s _Complex %(type_name)s;
#endif
#else
typedef struct { %(real_type)s real, imag; } %(type_name)s;
#endif
""")
complex_from_parts_utility_code = UtilityCode(
proto_block='utility_code_proto',
proto="""
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s, %(real_type)s);
""",
impl="""
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) {
return ::std::complex< %(real_type)s >(x, y);
}
#else
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) {
return x + y*(%(type)s)_Complex_I;
}
#endif
#else
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) {
%(type)s z;
z.real = x;
z.imag = y;
return z;
}
#endif
""")
complex_to_py_utility_code = UtilityCode(
proto="""
#define __pyx_PyComplex_FromComplex(z) \\
PyComplex_FromDoubles((double)__Pyx_CREAL(z), \\
(double)__Pyx_CIMAG(z))
""")
complex_from_py_utility_code = UtilityCode(
proto="""
static %(type)s __Pyx_PyComplex_As_%(type_name)s(PyObject*);
""",
impl="""
static %(type)s __Pyx_PyComplex_As_%(type_name)s(PyObject* o) {
Py_complex cval;
#if CYTHON_COMPILING_IN_CPYTHON
if (PyComplex_CheckExact(o))
cval = ((PyComplexObject *)o)->cval;
else
#endif
cval = PyComplex_AsCComplex(o);
return %(type_name)s_from_parts(
(%(real_type)s)cval.real,
(%(real_type)s)cval.imag);
}
""")
complex_arithmetic_utility_code = UtilityCode(
proto="""
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq%(m)s(a, b) ((a)==(b))
#define __Pyx_c_sum%(m)s(a, b) ((a)+(b))
#define __Pyx_c_diff%(m)s(a, b) ((a)-(b))
#define __Pyx_c_prod%(m)s(a, b) ((a)*(b))
#define __Pyx_c_quot%(m)s(a, b) ((a)/(b))
#define __Pyx_c_neg%(m)s(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero%(m)s(z) ((z)==(%(real_type)s)0)
#define __Pyx_c_conj%(m)s(z) (::std::conj(z))
#if %(is_float)s
#define __Pyx_c_abs%(m)s(z) (::std::abs(z))
#define __Pyx_c_pow%(m)s(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero%(m)s(z) ((z)==0)
#define __Pyx_c_conj%(m)s(z) (conj%(m)s(z))
#if %(is_float)s
#define __Pyx_c_abs%(m)s(z) (cabs%(m)s(z))
#define __Pyx_c_pow%(m)s(a, b) (cpow%(m)s(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_sum%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_diff%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_prod%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_quot%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_neg%(m)s(%(type)s);
static CYTHON_INLINE int __Pyx_c_is_zero%(m)s(%(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_conj%(m)s(%(type)s);
#if %(is_float)s
static CYTHON_INLINE %(real_type)s __Pyx_c_abs%(m)s(%(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_pow%(m)s(%(type)s, %(type)s);
#endif
#endif
""",
impl="""
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq%(m)s(%(type)s a, %(type)s b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE %(type)s __Pyx_c_sum%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_diff%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_prod%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_quot%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
%(real_type)s denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_neg%(m)s(%(type)s a) {
%(type)s z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero%(m)s(%(type)s a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE %(type)s __Pyx_c_conj%(m)s(%(type)s a) {
%(type)s z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if %(is_float)s
static CYTHON_INLINE %(real_type)s __Pyx_c_abs%(m)s(%(type)s z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt%(m)s(z.real*z.real + z.imag*z.imag);
#else
return hypot%(m)s(z.real, z.imag);
#endif
}
static CYTHON_INLINE %(type)s __Pyx_c_pow%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
%(real_type)s r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
%(real_type)s denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod%(m)s(a, a);
return __Pyx_c_prod%(m)s(a, a);
case 3:
z = __Pyx_c_prod%(m)s(a, a);
return __Pyx_c_prod%(m)s(z, a);
case 4:
z = __Pyx_c_prod%(m)s(a, a);
return __Pyx_c_prod%(m)s(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs%(m)s(a);
theta = atan2%(m)s(a.imag, a.real);
}
lnr = log%(m)s(r);
z_r = exp%(m)s(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos%(m)s(z_theta);
z.imag = z_r * sin%(m)s(z_theta);
return z;
}
#endif
#endif
""")
class CPointerBaseType(CType):
# common base type for pointer/array types
#
# base_type CType Reference type
subtypes = ['base_type']
def __init__(self, base_type):
self.base_type = base_type
for char_type in (c_char_type, c_uchar_type, c_schar_type):
if base_type.same_as(char_type):
self.is_string = 1
break
else:
if base_type.same_as(c_py_unicode_type):
self.is_pyunicode_ptr = 1
if self.is_string and not base_type.is_error:
if base_type.signed:
self.to_py_function = "__Pyx_PyObject_FromString"
if self.is_ptr:
if base_type.signed == 2:
self.from_py_function = "__Pyx_PyObject_AsSString"
else:
self.from_py_function = "__Pyx_PyObject_AsString"
else:
self.to_py_function = "__Pyx_PyObject_FromUString"
if self.is_ptr:
self.from_py_function = "__Pyx_PyObject_AsUString"
self.exception_value = "NULL"
elif self.is_pyunicode_ptr and not base_type.is_error:
self.to_py_function = "__Pyx_PyUnicode_FromUnicode"
if self.is_ptr:
self.from_py_function = "__Pyx_PyUnicode_AsUnicode"
self.exception_value = "NULL"
def py_type_name(self):
if self.is_string:
return "bytes"
elif self.is_pyunicode_ptr:
return "unicode"
else:
return super(CPointerBaseType, self).py_type_name()
def literal_code(self, value):
if self.is_string:
assert isinstance(value, str)
return '"%s"' % StringEncoding.escape_byte_string(value)
class CArrayType(CPointerBaseType):
# base_type CType Element type
# size integer or None Number of elements
is_array = 1
def __init__(self, base_type, size):
super(CArrayType, self).__init__(base_type)
self.size = size
def __eq__(self, other):
if isinstance(other, CType) and other.is_array and self.size == other.size:
return self.base_type.same_as(other.base_type)
return False
def __hash__(self):
return hash(self.base_type) + 28 # arbitrarily chosen offset
def __repr__(self):
return "<CArrayType %s %s>" % (self.size, repr(self.base_type))
def same_as_resolved_type(self, other_type):
return ((other_type.is_array and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def assignable_from_resolved_type(self, src_type):
# Can't assign to a variable of an array type
return 0
def element_ptr_type(self):
return c_ptr_type(self.base_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.size is not None:
dimension_code = self.size
else:
dimension_code = ""
if entity_code.startswith("*"):
entity_code = "(%s)" % entity_code
return self.base_type.declaration_code(
"%s[%s]" % (entity_code, dimension_code),
for_display, dll_linkage, pyrex)
def as_argument_type(self):
return c_ptr_type(self.base_type)
def is_complete(self):
return self.size is not None
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
return self
else:
return CArrayType(base_type)
def deduce_template_params(self, actual):
if isinstance(actual, CArrayType):
return self.base_type.deduce_template_params(actual.base_type)
else:
return None
class CPtrType(CPointerBaseType):
# base_type CType Reference type
is_ptr = 1
default_value = "0"
def __hash__(self):
return hash(self.base_type) + 27 # arbitrarily chosen offset
def __eq__(self, other):
if isinstance(other, CType) and other.is_ptr:
return self.base_type.same_as(other.base_type)
return False
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<CPtrType %s>" % repr(self.base_type)
def same_as_resolved_type(self, other_type):
return ((other_type.is_ptr and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CPtrType.declaration_code: pointer to", self.base_type ###
return self.base_type.declaration_code(
"*%s" % entity_code,
for_display, dll_linkage, pyrex)
def assignable_from_resolved_type(self, other_type):
if other_type is error_type:
return 1
if other_type.is_null_ptr:
return 1
if self.base_type.is_const:
self = CPtrType(self.base_type.const_base_type)
if self.base_type.is_cfunction:
if other_type.is_ptr:
other_type = other_type.base_type.resolve()
if other_type.is_cfunction:
return self.base_type.pointer_assignable_from_resolved_type(other_type)
else:
return 0
if (self.base_type.is_cpp_class and other_type.is_ptr
and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(self.base_type)):
return 1
if other_type.is_array or other_type.is_ptr:
return self.base_type.is_void or self.base_type.same_as(other_type.base_type)
return 0
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
return self
else:
return CPtrType(base_type)
def deduce_template_params(self, actual):
if isinstance(actual, CPtrType):
return self.base_type.deduce_template_params(actual.base_type)
else:
return None
def invalid_value(self):
return "1"
def find_cpp_operation_type(self, operator, operand_type=None):
if self.base_type.is_cpp_class:
return self.base_type.find_cpp_operation_type(operator, operand_type)
return None
class CNullPtrType(CPtrType):
is_null_ptr = 1
class CReferenceType(BaseType):
is_reference = 1
def __init__(self, base_type):
self.ref_base_type = base_type
def __repr__(self):
return "<CReferenceType %s>" % repr(self.ref_base_type)
def __str__(self):
return "%s &" % self.ref_base_type
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CReferenceType.declaration_code: pointer to", self.base_type ###
return self.ref_base_type.declaration_code(
"&%s" % entity_code,
for_display, dll_linkage, pyrex)
def specialize(self, values):
base_type = self.ref_base_type.specialize(values)
if base_type == self.ref_base_type:
return self
else:
return CReferenceType(base_type)
def deduce_template_params(self, actual):
return self.ref_base_type.deduce_template_params(actual)
def __getattr__(self, name):
return getattr(self.ref_base_type, name)
class CFuncType(CType):
# return_type CType
# args [CFuncTypeArg]
# has_varargs boolean
# exception_value string
# exception_check boolean True if PyErr_Occurred check needed
# calling_convention string Function calling convention
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# templates [string] or None
# cached_specialized_types [CFuncType] cached specialized versions of the CFuncType if defined in a pxd
# from_fused boolean Indicates whether this is a specialized
# C function
# is_strict_signature boolean function refuses to accept coerced arguments
# (used for optimisation overrides)
# is_const_method boolean
is_cfunction = 1
original_sig = None
cached_specialized_types = None
from_fused = False
is_const_method = False
subtypes = ['return_type', 'args']
def __init__(self, return_type, args, has_varargs = 0,
exception_value = None, exception_check = 0, calling_convention = "",
nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0,
is_const_method = False, templates = None, is_strict_signature = False):
self.return_type = return_type
self.args = args
self.has_varargs = has_varargs
self.optional_arg_count = optional_arg_count
self.exception_value = exception_value
self.exception_check = exception_check
self.calling_convention = calling_convention
self.nogil = nogil
self.with_gil = with_gil
self.is_overridable = is_overridable
self.is_const_method = is_const_method
self.templates = templates
self.is_strict_signature = is_strict_signature
def __repr__(self):
arg_reprs = map(repr, self.args)
if self.has_varargs:
arg_reprs.append("...")
if self.exception_value:
except_clause = " %r" % self.exception_value
else:
except_clause = ""
if self.exception_check:
except_clause += "?"
return "<CFuncType %s %s[%s]%s>" % (
repr(self.return_type),
self.calling_convention_prefix(),
",".join(arg_reprs),
except_clause)
def calling_convention_prefix(self):
cc = self.calling_convention
if cc:
return cc + " "
else:
return ""
def as_argument_type(self):
return c_ptr_type(self)
def same_c_signature_as(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(
other_type.resolve(), as_cmethod)
def same_c_signature_as_resolved_type(self, other_type, as_cmethod = 0):
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
if self.is_overridable != other_type.is_overridable:
return 0
nargs = len(self.args)
if nargs != len(other_type.args):
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, nargs):
if not self.args[i].type.same_as(
other_type.args[i].type):
return 0
if self.has_varargs != other_type.has_varargs:
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
if not self.return_type.same_as(other_type.return_type):
return 0
if not self.same_calling_convention_as(other_type):
return 0
return 1
def compatible_signature_with(self, other_type, as_cmethod = 0):
return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod)
def compatible_signature_with_resolved_type(self, other_type, as_cmethod):
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
if not self.is_overridable and other_type.is_overridable:
return 0
nargs = len(self.args)
if nargs - self.optional_arg_count != len(other_type.args) - other_type.optional_arg_count:
return 0
if self.optional_arg_count < other_type.optional_arg_count:
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, len(other_type.args)):
if not self.args[i].type.same_as(
other_type.args[i].type):
return 0
if self.has_varargs != other_type.has_varargs:
return 0
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
if not self.same_calling_convention_as(other_type):
return 0
if self.nogil != other_type.nogil:
return 0
self.original_sig = other_type.original_sig or other_type
return 1
def narrower_c_signature_than(self, other_type, as_cmethod = 0):
return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod)
def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod):
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
nargs = len(self.args)
if nargs != len(other_type.args):
return 0
for i in range(as_cmethod, nargs):
if not self.args[i].type.subtype_of_resolved_type(other_type.args[i].type):
return 0
else:
self.args[i].needs_type_test = other_type.args[i].needs_type_test \
or not self.args[i].type.same_as(other_type.args[i].type)
if self.has_varargs != other_type.has_varargs:
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
return 1
def same_calling_convention_as(self, other):
## XXX Under discussion ...
## callspec_words = ("__stdcall", "__cdecl", "__fastcall")
## cs1 = self.calling_convention
## cs2 = other.calling_convention
## if (cs1 in callspec_words or
## cs2 in callspec_words):
## return cs1 == cs2
## else:
## return True
sc1 = self.calling_convention == '__stdcall'
sc2 = other.calling_convention == '__stdcall'
return sc1 == sc2
def same_exception_signature_as(self, other_type):
return self.same_exception_signature_as_resolved_type(
other_type.resolve())
def same_exception_signature_as_resolved_type(self, other_type):
return self.exception_value == other_type.exception_value \
and self.exception_check == other_type.exception_check
def same_as_resolved_type(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(other_type, as_cmethod) \
and self.same_exception_signature_as_resolved_type(other_type) \
and self.nogil == other_type.nogil
def pointer_assignable_from_resolved_type(self, other_type):
return self.same_c_signature_as_resolved_type(other_type) \
and self.same_exception_signature_as_resolved_type(other_type) \
and not (self.nogil and not other_type.nogil)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0,
with_calling_convention = 1):
arg_decl_list = []
for arg in self.args[:len(self.args)-self.optional_arg_count]:
arg_decl_list.append(
arg.type.declaration_code("", for_display, pyrex = pyrex))
if self.is_overridable:
arg_decl_list.append("int %s" % Naming.skip_dispatch_cname)
if self.optional_arg_count:
arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname))
if self.has_varargs:
arg_decl_list.append("...")
arg_decl_code = ", ".join(arg_decl_list)
if not arg_decl_code and not pyrex:
arg_decl_code = "void"
trailer = ""
if (pyrex or for_display) and not self.return_type.is_pyobject:
if self.exception_value and self.exception_check:
trailer = " except? %s" % self.exception_value
elif self.exception_value:
trailer = " except %s" % self.exception_value
elif self.exception_check == '+':
trailer = " except +"
else:
" except *" # ignored
if self.nogil:
trailer += " nogil"
if not with_calling_convention:
cc = ''
else:
cc = self.calling_convention_prefix()
if (not entity_code and cc) or entity_code.startswith("*"):
entity_code = "(%s%s)" % (cc, entity_code)
cc = ""
if self.is_const_method:
trailer += " const"
return self.return_type.declaration_code(
"%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer),
for_display, dll_linkage, pyrex)
def function_header_code(self, func_name, arg_code):
if self.is_const_method:
trailer = " const"
else:
trailer = ""
return "%s%s(%s)%s" % (self.calling_convention_prefix(),
func_name, arg_code, trailer)
def signature_string(self):
s = self.declaration_code("")
return s
def signature_cast_string(self):
s = self.declaration_code("(*)", with_calling_convention=False)
return '(%s)' % s
def specialize(self, values):
result = CFuncType(self.return_type.specialize(values),
[arg.specialize(values) for arg in self.args],
has_varargs = self.has_varargs,
exception_value = self.exception_value,
exception_check = self.exception_check,
calling_convention = self.calling_convention,
nogil = self.nogil,
with_gil = self.with_gil,
is_overridable = self.is_overridable,
optional_arg_count = self.optional_arg_count,
is_const_method = self.is_const_method,
templates = self.templates)
result.from_fused = self.is_fused
return result
def opt_arg_cname(self, arg_name):
return self.op_arg_struct.base_type.scope.lookup(arg_name).cname
# Methods that deal with Fused Types
# All but map_with_specific_entries should be called only on functions
# with fused types (and not on their corresponding specific versions).
def get_all_specialized_permutations(self, fused_types=None):
"""
Permute all the types. For every specific instance of a fused type, we
want all other specific instances of all other fused types.
It returns an iterable of two-tuples of the cname that should prefix
the cname of the function, and a dict mapping any fused types to their
respective specific types.
"""
assert self.is_fused
if fused_types is None:
fused_types = self.get_fused_types()
return get_all_specialized_permutations(fused_types)
def get_all_specialized_function_types(self):
"""
Get all the specific function types of this one.
"""
assert self.is_fused
if self.entry.fused_cfunction:
return [n.type for n in self.entry.fused_cfunction.nodes]
elif self.cached_specialized_types is not None:
return self.cached_specialized_types
cfunc_entries = self.entry.scope.cfunc_entries
cfunc_entries.remove(self.entry)
result = []
permutations = self.get_all_specialized_permutations()
for cname, fused_to_specific in permutations:
new_func_type = self.entry.type.specialize(fused_to_specific)
if self.optional_arg_count:
# Remember, this method is set by CFuncDeclaratorNode
self.declare_opt_arg_struct(new_func_type, cname)
new_entry = copy.deepcopy(self.entry)
new_func_type.specialize_entry(new_entry, cname)
new_entry.type = new_func_type
new_func_type.entry = new_entry
result.append(new_func_type)
cfunc_entries.append(new_entry)
self.cached_specialized_types = result
return result
def get_fused_types(self, result=None, seen=None, subtypes=None):
"""Return fused types in the order they appear as parameter types"""
return super(CFuncType, self).get_fused_types(result, seen,
subtypes=['args'])
def specialize_entry(self, entry, cname):
assert not self.is_fused
specialize_entry(entry, cname)
def specialize_entry(entry, cname):
"""
Specialize an entry of a copied fused function or method
"""
entry.is_fused_specialized = True
entry.name = get_fused_cname(cname, entry.name)
if entry.is_cmethod:
entry.cname = entry.name
if entry.is_inherited:
entry.cname = StringEncoding.EncodedString(
"%s.%s" % (Naming.obj_base_cname, entry.cname))
else:
entry.cname = get_fused_cname(cname, entry.cname)
if entry.func_cname:
entry.func_cname = get_fused_cname(cname, entry.func_cname)
def get_fused_cname(fused_cname, orig_cname):
"""
Given the fused cname id and an original cname, return a specialized cname
"""
assert fused_cname and orig_cname
return StringEncoding.EncodedString('%s%s%s' % (Naming.fused_func_prefix,
fused_cname, orig_cname))
def unique(somelist):
seen = set()
result = []
for obj in somelist:
if obj not in seen:
result.append(obj)
seen.add(obj)
return result
def get_all_specialized_permutations(fused_types):
return _get_all_specialized_permutations(unique(fused_types))
def _get_all_specialized_permutations(fused_types, id="", f2s=()):
fused_type, = fused_types[0].get_fused_types()
result = []
for newid, specific_type in enumerate(fused_type.types):
# f2s = dict(f2s, **{ fused_type: specific_type })
f2s = dict(f2s)
f2s.update({ fused_type: specific_type })
if id:
cname = '%s_%s' % (id, newid)
else:
cname = str(newid)
if len(fused_types) > 1:
result.extend(_get_all_specialized_permutations(
fused_types[1:], cname, f2s))
else:
result.append((cname, f2s))
return result
def specialization_signature_string(fused_compound_type, fused_to_specific):
"""
Return the signature for a specialization of a fused type. e.g.
floating[:] ->
'float' or 'double'
cdef fused ft:
float[:]
double[:]
ft ->
'float[:]' or 'double[:]'
integral func(floating) ->
'int (*func)(float)' or ...
"""
fused_types = fused_compound_type.get_fused_types()
if len(fused_types) == 1:
fused_type = fused_types[0]
else:
fused_type = fused_compound_type
return fused_type.specialize(fused_to_specific).typeof_name()
def get_specialized_types(type):
"""
Return a list of specialized types sorted in reverse order in accordance
with their preference in runtime fused-type dispatch
"""
assert type.is_fused
if isinstance(type, FusedType):
result = type.types
for specialized_type in result:
specialized_type.specialization_string = specialized_type.typeof_name()
else:
result = []
for cname, f2s in get_all_specialized_permutations(type.get_fused_types()):
specialized_type = type.specialize(f2s)
specialized_type.specialization_string = (
specialization_signature_string(type, f2s))
result.append(specialized_type)
return sorted(result)
class CFuncTypeArg(BaseType):
# name string
# cname string
# type PyrexType
# pos source file position
# FIXME: is this the right setup? should None be allowed here?
not_none = False
or_none = False
accept_none = True
accept_builtin_subtypes = False
subtypes = ['type']
def __init__(self, name, type, pos, cname=None):
self.name = name
if cname is not None:
self.cname = cname
else:
self.cname = Naming.var_prefix + name
self.type = type
self.pos = pos
self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
def __repr__(self):
return "%s:%s" % (self.name, repr(self.type))
def declaration_code(self, for_display = 0):
return self.type.declaration_code(self.cname, for_display)
def specialize(self, values):
return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
class ToPyStructUtilityCode(object):
requires = None
def __init__(self, type, forward_decl):
self.type = type
self.header = "static PyObject* %s(%s)" % (type.to_py_function,
type.declaration_code('s'))
self.forward_decl = forward_decl
def __eq__(self, other):
return isinstance(other, ToPyStructUtilityCode) and self.header == other.header
def __hash__(self):
return hash(self.header)
def get_tree(self):
pass
def put_code(self, output):
code = output['utility_code_def']
proto = output['utility_code_proto']
code.putln("%s {" % self.header)
code.putln("PyObject* res;")
code.putln("PyObject* member;")
code.putln("res = PyDict_New(); if (res == NULL) return NULL;")
for member in self.type.scope.var_entries:
nameconst_cname = code.get_py_string_const(member.name, identifier=True)
code.putln("member = %s(s.%s); if (member == NULL) goto bad;" % (
member.type.to_py_function, member.cname))
code.putln("if (PyDict_SetItem(res, %s, member) < 0) goto bad;" % nameconst_cname)
code.putln("Py_DECREF(member);")
code.putln("return res;")
code.putln("bad:")
code.putln("Py_XDECREF(member);")
code.putln("Py_DECREF(res);")
code.putln("return NULL;")
code.putln("}")
# This is a bit of a hack, we need a forward declaration
# due to the way things are ordered in the module...
if self.forward_decl:
proto.putln(self.type.declaration_code('') + ';')
proto.putln(self.header + ";")
def inject_tree_and_scope_into(self, module_node):
pass
class CStructOrUnionType(CType):
# name string
# cname string
# kind string "struct" or "union"
# scope StructOrUnionScope, or None if incomplete
# typedef_flag boolean
# packed boolean
# entry Entry
is_struct_or_union = 1
has_attributes = 1
exception_check = True
def __init__(self, name, kind, scope, typedef_flag, cname, packed=False):
self.name = name
self.cname = cname
self.kind = kind
self.scope = scope
self.typedef_flag = typedef_flag
self.is_struct = kind == 'struct'
if self.is_struct:
self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname)
self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname)
self.exception_check = True
self._convert_to_py_code = None
self._convert_from_py_code = None
self.packed = packed
def create_to_py_utility_code(self, env):
if env.outer_scope is None:
return False
if self._convert_to_py_code is False:
return None # tri-state-ish
if self._convert_to_py_code is None:
for member in self.scope.var_entries:
if not member.type.create_to_py_utility_code(env):
self.to_py_function = None
self._convert_to_py_code = False
return False
forward_decl = (self.entry.visibility != 'extern')
self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl)
env.use_utility_code(self._convert_to_py_code)
return True
def create_from_py_utility_code(self, env):
if env.outer_scope is None:
return False
if self._convert_from_py_code is False:
return None # tri-state-ish
if self._convert_from_py_code is None:
for member in self.scope.var_entries:
if not member.type.create_from_py_utility_code(env):
self.from_py_function = None
self._convert_from_py_code = False
return False
context = dict(
struct_type_decl=self.declaration_code(""),
var_entries=self.scope.var_entries,
funcname=self.from_py_function,
)
self._convert_from_py_code = TempitaUtilityCode.load(
"FromPyStructUtility", "TypeConversion.c", context=context)
env.use_utility_code(self._convert_from_py_code)
return True
def __repr__(self):
return "<CStructOrUnionType %s %s%s>" % (
self.name, self.cname,
("", " typedef")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display=0, dll_linkage=None, pyrex=0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
base_code = self.cname
else:
base_code = "%s %s" % (self.kind, self.cname)
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def __eq__(self, other):
try:
return (isinstance(other, CStructOrUnionType) and
self.name == other.name)
except AttributeError:
return False
def __lt__(self, other):
try:
return self.name < other.name
except AttributeError:
# this is arbitrary, but it makes sure we always have
# *some* kind of order
return False
def __hash__(self):
return hash(self.cname) ^ hash(self.kind)
def is_complete(self):
return self.scope is not None
def attributes_known(self):
return self.is_complete()
def can_be_complex(self):
# Does the struct consist of exactly two identical floats?
fields = self.scope.var_entries
if len(fields) != 2: return False
a, b = fields
return (a.type.is_float and b.type.is_float and
a.type.declaration_code("") ==
b.type.declaration_code(""))
def struct_nesting_depth(self):
child_depths = [x.type.struct_nesting_depth()
for x in self.scope.var_entries]
return max(child_depths) + 1
def cast_code(self, expr_code):
if self.is_struct:
return expr_code
return super(CStructOrUnionType, self).cast_code(expr_code)
builtin_cpp_conversions = ("std::string",
"std::pair",
"std::vector", "std::list",
"std::set", "std::unordered_set",
"std::map", "std::unordered_map")
class CppClassType(CType):
# name string
# cname string
# scope CppClassScope
# templates [string] or None
is_cpp_class = 1
has_attributes = 1
exception_check = True
namespace = None
# For struct-like declaration.
kind = "struct"
packed = False
typedef_flag = False
subtypes = ['templates']
def __init__(self, name, scope, cname, base_classes, templates = None, template_type = None):
self.name = name
self.cname = cname
self.scope = scope
self.base_classes = base_classes
self.operators = []
self.templates = templates
self.template_type = template_type
self.specializations = {}
self.is_cpp_string = cname == 'std::string'
def use_conversion_utility(self, from_or_to):
pass
def maybe_unordered(self):
if 'unordered' in self.cname:
return 'unordered_'
else:
return ''
def create_from_py_utility_code(self, env):
if self.from_py_function is not None:
return True
if self.cname in builtin_cpp_conversions:
X = "XYZABC"
tags = []
declarations = ["cdef extern from *:"]
for ix, T in enumerate(self.templates or []):
if T.is_pyobject or not T.create_from_py_utility_code(env):
return False
tags.append(T.specialization_name())
if T.exception_value is not None:
except_clause = T.exception_value
if T.exception_check:
except_clause = "? %s" % except_clause
declarations.append(
" ctypedef %s %s '%s'" % (
T.declaration_code("", for_display=True), X[ix], T.declaration_code("")))
else:
except_clause = "*"
declarations.append(
" ctypedef struct %s '%s':\n pass" % (
X[ix], T.declaration_code("")))
declarations.append(
" cdef %s %s_from_py '%s' (object) except %s" % (
X[ix], X[ix], T.from_py_function, except_clause))
cls = self.cname[5:]
cname = '__pyx_convert_%s_from_py_%s' % (cls, '____'.join(tags))
context = {
'template_type_declarations': '\n'.join(declarations),
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
}
from UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx", context=context))
self.from_py_function = cname
return True
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return True
if self.cname in builtin_cpp_conversions:
X = "XYZABC"
tags = []
declarations = ["cdef extern from *:"]
for ix, T in enumerate(self.templates or []):
if not T.create_to_py_utility_code(env):
return False
tags.append(T.specialization_name())
declarations.append(
" ctypedef struct %s '%s':\n pass" % (
X[ix], T.declaration_code("")))
declarations.append(
" cdef object %s_to_py '%s' (%s)" % (
X[ix], T.to_py_function, X[ix]))
cls = self.cname[5:]
cname = "__pyx_convert_%s_to_py_%s" % (cls, "____".join(tags))
context = {
'template_type_declarations': '\n'.join(declarations),
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
}
from UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx", context=context))
self.to_py_function = cname
return True
def specialize_here(self, pos, template_values = None):
if self.templates is None:
error(pos, "'%s' type is not a template" % self)
return error_type
if len(self.templates) != len(template_values):
error(pos, "%s templated type receives %d arguments, got %d" %
(self.name, len(self.templates), len(template_values)))
return error_type
has_object_template_param = False
for value in template_values:
if value.is_pyobject:
has_object_template_param = True
error(pos,
"Python object type '%s' cannot be used as a template argument" % value)
if has_object_template_param:
return error_type
return self.specialize(dict(zip(self.templates, template_values)))
def specialize(self, values):
if not self.templates and not self.namespace:
return self
if self.templates is None:
self.templates = []
key = tuple(values.items())
if key in self.specializations:
return self.specializations[key]
template_values = [t.specialize(values) for t in self.templates]
specialized = self.specializations[key] = \
CppClassType(self.name, None, self.cname, [], template_values, template_type=self)
# Need to do these *after* self.specializations[key] is set
# to avoid infinite recursion on circular references.
specialized.base_classes = [b.specialize(values) for b in self.base_classes]
specialized.scope = self.scope.specialize(values)
if self.namespace is not None:
specialized.namespace = self.namespace.specialize(values)
return specialized
def deduce_template_params(self, actual):
if self == actual:
return {}
# TODO(robertwb): Actual type equality.
elif self.declaration_code("") == actual.template_type.declaration_code(""):
return reduce(
merge_template_deductions,
[formal_param.deduce_template_params(actual_param) for (formal_param, actual_param) in zip(self.templates, actual.templates)],
{})
else:
return None
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.templates:
template_strings = [param.declaration_code('', for_display, None, pyrex)
for param in self.templates]
if for_display:
brackets = "[%s]"
else:
brackets = "<%s>"
templates = brackets % ",".join(template_strings)
if templates[-2:] == ">>":
templates = templates[:-2] + "> >"
else:
templates = ""
if pyrex or for_display:
base_code = "%s%s" % (self.name, templates)
else:
base_code = "%s%s" % (self.cname, templates)
if self.namespace is not None:
base_code = "%s::%s" % (self.namespace.declaration_code(''), base_code)
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def is_subclass(self, other_type):
if self.same_as_resolved_type(other_type):
return 1
for base_class in self.base_classes:
if base_class.is_subclass(other_type):
return 1
return 0
def same_as_resolved_type(self, other_type):
if other_type.is_cpp_class:
if self == other_type:
return 1
elif (self.cname == other_type.cname and
self.template_type and other_type.template_type):
if self.templates == other_type.templates:
return 1
for t1, t2 in zip(self.templates, other_type.templates):
if not t1.same_as_resolved_type(t2):
return 0
return 1
return 0
def assignable_from_resolved_type(self, other_type):
# TODO: handle operator=(...) here?
if other_type is error_type:
return True
return other_type.is_cpp_class and other_type.is_subclass(self)
def attributes_known(self):
return self.scope is not None
def find_cpp_operation_type(self, operator, operand_type=None):
operands = [self]
if operand_type is not None:
operands.append(operand_type)
# pos == None => no errors
operator_entry = self.scope.lookup_operator_for_types(None, operator, operands)
if not operator_entry:
return None
func_type = operator_entry.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type.return_type
def check_nullary_constructor(self, pos, msg="stack allocated"):
constructor = self.scope.lookup(u'<init>')
if constructor is not None and best_match([], constructor.all_alternatives()) is None:
error(pos, "C++ class must have a nullary constructor to be %s" % msg)
class TemplatePlaceholderType(CType):
def __init__(self, name):
self.name = name
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if entity_code:
return self.name + " " + entity_code
else:
return self.name
def specialize(self, values):
if self in values:
return values[self]
else:
return self
def deduce_template_params(self, actual):
return {self: actual}
def same_as_resolved_type(self, other_type):
if isinstance(other_type, TemplatePlaceholderType):
return self.name == other_type.name
else:
return 0
def __hash__(self):
return hash(self.name)
def __cmp__(self, other):
if isinstance(other, TemplatePlaceholderType):
return cmp(self.name, other.name)
else:
return cmp(type(self), type(other))
def __eq__(self, other):
if isinstance(other, TemplatePlaceholderType):
return self.name == other.name
else:
return False
class CEnumType(CType):
# name string
# cname string or None
# typedef_flag boolean
is_enum = 1
signed = 1
rank = -1 # Ranks below any integer type
to_py_function = "PyInt_FromLong"
from_py_function = "PyInt_AsLong"
def __init__(self, name, cname, typedef_flag):
self.name = name
self.cname = cname
self.values = []
self.typedef_flag = typedef_flag
def __str__(self):
return self.name
def __repr__(self):
return "<CEnumType %s %s%s>" % (self.name, self.cname,
("", " typedef")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
base_code = self.cname
else:
base_code = "enum %s" % self.cname
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
class UnspecifiedType(PyrexType):
# Used as a placeholder until the type can be determined.
is_unspecified = 1
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<unspecified>"
def same_as_resolved_type(self, other_type):
return False
class ErrorType(PyrexType):
# Used to prevent propagation of error messages.
is_error = 1
exception_value = "0"
exception_check = 0
to_py_function = "dummy"
from_py_function = "dummy"
def create_to_py_utility_code(self, env):
return True
def create_from_py_utility_code(self, env):
return True
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<error>"
def same_as_resolved_type(self, other_type):
return 1
def error_condition(self, result_code):
return "dummy"
rank_to_type_name = (
"char", # 0
"short", # 1
"int", # 2
"long", # 3
"PY_LONG_LONG", # 4
"float", # 5
"double", # 6
"long double", # 7
)
_rank_to_type_name = list(rank_to_type_name)
RANK_INT = _rank_to_type_name.index('int')
RANK_LONG = _rank_to_type_name.index('long')
RANK_FLOAT = _rank_to_type_name.index('float')
UNSIGNED = 0
SIGNED = 2
error_type = ErrorType()
unspecified_type = UnspecifiedType()
py_object_type = PyObjectType()
c_void_type = CVoidType()
c_uchar_type = CIntType(0, UNSIGNED)
c_ushort_type = CIntType(1, UNSIGNED)
c_uint_type = CIntType(2, UNSIGNED)
c_ulong_type = CIntType(3, UNSIGNED)
c_ulonglong_type = CIntType(4, UNSIGNED)
c_char_type = CIntType(0)
c_short_type = CIntType(1)
c_int_type = CIntType(2)
c_long_type = CIntType(3)
c_longlong_type = CIntType(4)
c_schar_type = CIntType(0, SIGNED)
c_sshort_type = CIntType(1, SIGNED)
c_sint_type = CIntType(2, SIGNED)
c_slong_type = CIntType(3, SIGNED)
c_slonglong_type = CIntType(4, SIGNED)
c_float_type = CFloatType(5, math_h_modifier='f')
c_double_type = CFloatType(6)
c_longdouble_type = CFloatType(7, math_h_modifier='l')
c_float_complex_type = CComplexType(c_float_type)
c_double_complex_type = CComplexType(c_double_type)
c_longdouble_complex_type = CComplexType(c_longdouble_type)
c_anon_enum_type = CAnonEnumType(-1)
c_returncode_type = CReturnCodeType(RANK_INT)
c_bint_type = CBIntType(RANK_INT)
c_py_unicode_type = CPyUnicodeIntType(RANK_INT-0.5, UNSIGNED)
c_py_ucs4_type = CPyUCS4IntType(RANK_LONG-0.5, UNSIGNED)
c_py_hash_t_type = CPyHashTType(RANK_LONG+0.5, SIGNED)
c_py_ssize_t_type = CPySSizeTType(RANK_LONG+0.5, SIGNED)
c_ssize_t_type = CSSizeTType(RANK_LONG+0.5, SIGNED)
c_size_t_type = CSizeTType(RANK_LONG+0.5, UNSIGNED)
c_ptrdiff_t_type = CPtrdiffTType(RANK_LONG+0.75, SIGNED)
c_null_ptr_type = CNullPtrType(c_void_type)
c_void_ptr_type = CPtrType(c_void_type)
c_void_ptr_ptr_type = CPtrType(c_void_ptr_type)
c_char_ptr_type = CPtrType(c_char_type)
c_uchar_ptr_type = CPtrType(c_uchar_type)
c_char_ptr_ptr_type = CPtrType(c_char_ptr_type)
c_int_ptr_type = CPtrType(c_int_type)
c_py_unicode_ptr_type = CPtrType(c_py_unicode_type)
c_py_ssize_t_ptr_type = CPtrType(c_py_ssize_t_type)
c_ssize_t_ptr_type = CPtrType(c_ssize_t_type)
c_size_t_ptr_type = CPtrType(c_size_t_type)
# GIL state
c_gilstate_type = CEnumType("PyGILState_STATE", "PyGILState_STATE", True)
c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState")
c_threadstate_ptr_type = CPtrType(c_threadstate_type)
# the Py_buffer type is defined in Builtin.py
c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer")
c_py_buffer_ptr_type = CPtrType(c_py_buffer_type)
# Not sure whether the unsigned versions and 'long long' should be in there
# long long requires C99 and might be slow, and would always get preferred
# when specialization happens through calling and not indexing
cy_integral_type = FusedType([c_short_type, c_int_type, c_long_type],
name="integral")
# Omitting long double as it might be slow
cy_floating_type = FusedType([c_float_type, c_double_type], name="floating")
cy_numeric_type = FusedType([c_short_type,
c_int_type,
c_long_type,
c_float_type,
c_double_type,
c_float_complex_type,
c_double_complex_type], name="numeric")
# buffer-related structs
c_buf_diminfo_type = CStructOrUnionType("__Pyx_Buf_DimInfo", "struct",
None, 1, "__Pyx_Buf_DimInfo")
c_pyx_buffer_type = CStructOrUnionType("__Pyx_Buffer", "struct", None, 1, "__Pyx_Buffer")
c_pyx_buffer_ptr_type = CPtrType(c_pyx_buffer_type)
c_pyx_buffer_nd_type = CStructOrUnionType("__Pyx_LocalBuf_ND", "struct",
None, 1, "__Pyx_LocalBuf_ND")
cython_memoryview_type = CStructOrUnionType("__pyx_memoryview_obj", "struct",
None, 0, "__pyx_memoryview_obj")
memoryviewslice_type = CStructOrUnionType("memoryviewslice", "struct",
None, 1, "__Pyx_memviewslice")
modifiers_and_name_to_type = {
#(signed, longness, name) : type
(0, 0, "char"): c_uchar_type,
(1, 0, "char"): c_char_type,
(2, 0, "char"): c_schar_type,
(0, -1, "int"): c_ushort_type,
(0, 0, "int"): c_uint_type,
(0, 1, "int"): c_ulong_type,
(0, 2, "int"): c_ulonglong_type,
(1, -1, "int"): c_short_type,
(1, 0, "int"): c_int_type,
(1, 1, "int"): c_long_type,
(1, 2, "int"): c_longlong_type,
(2, -1, "int"): c_sshort_type,
(2, 0, "int"): c_sint_type,
(2, 1, "int"): c_slong_type,
(2, 2, "int"): c_slonglong_type,
(1, 0, "float"): c_float_type,
(1, 0, "double"): c_double_type,
(1, 1, "double"): c_longdouble_type,
(1, 0, "complex"): c_double_complex_type, # C: float, Python: double => Python wins
(1, 0, "floatcomplex"): c_float_complex_type,
(1, 0, "doublecomplex"): c_double_complex_type,
(1, 1, "doublecomplex"): c_longdouble_complex_type,
#
(1, 0, "void"): c_void_type,
(1, 0, "bint"): c_bint_type,
(0, 0, "Py_UNICODE"): c_py_unicode_type,
(0, 0, "Py_UCS4"): c_py_ucs4_type,
(2, 0, "Py_hash_t"): c_py_hash_t_type,
(2, 0, "Py_ssize_t"): c_py_ssize_t_type,
(2, 0, "ssize_t") : c_ssize_t_type,
(0, 0, "size_t") : c_size_t_type,
(2, 0, "ptrdiff_t") : c_ptrdiff_t_type,
(1, 0, "object"): py_object_type,
}
def is_promotion(src_type, dst_type):
# It's hard to find a hard definition of promotion, but empirical
# evidence suggests that the below is all that's allowed.
if src_type.is_numeric:
if dst_type.same_as(c_int_type):
unsigned = (not src_type.signed)
return (src_type.is_enum or
(src_type.is_int and
unsigned + src_type.rank < dst_type.rank))
elif dst_type.same_as(c_double_type):
return src_type.is_float and src_type.rank <= dst_type.rank
return False
def best_match(args, functions, pos=None, env=None):
"""
Given a list args of arguments and a list of functions, choose one
to call which seems to be the "best" fit for this list of arguments.
This function is used, e.g., when deciding which overloaded method
to dispatch for C++ classes.
We first eliminate functions based on arity, and if only one
function has the correct arity, we return it. Otherwise, we weight
functions based on how much work must be done to convert the
arguments, with the following priorities:
* identical types or pointers to identical types
* promotions
* non-Python types
That is, we prefer functions where no arguments need converted,
and failing that, functions where only promotions are required, and
so on.
If no function is deemed a good fit, or if two or more functions have
the same weight, we return None (as there is no best match). If pos
is not None, we also generate an error.
"""
# TODO: args should be a list of types, not a list of Nodes.
actual_nargs = len(args)
candidates = []
errors = []
for func in functions:
error_mesg = ""
func_type = func.type
if func_type.is_ptr:
func_type = func_type.base_type
# Check function type
if not func_type.is_cfunction:
if not func_type.is_error and pos is not None:
error_mesg = "Calling non-function type '%s'" % func_type
errors.append((func, error_mesg))
continue
# Check no. of args
max_nargs = len(func_type.args)
min_nargs = max_nargs - func_type.optional_arg_count
if actual_nargs < min_nargs or \
(not func_type.has_varargs and actual_nargs > max_nargs):
if max_nargs == min_nargs and not func_type.has_varargs:
expectation = max_nargs
elif actual_nargs < min_nargs:
expectation = "at least %s" % min_nargs
else:
expectation = "at most %s" % max_nargs
error_mesg = "Call with wrong number of arguments (expected %s, got %s)" \
% (expectation, actual_nargs)
errors.append((func, error_mesg))
continue
if func_type.templates:
arg_types = [arg.type for arg in args]
deductions = reduce(
merge_template_deductions,
[pattern.type.deduce_template_params(actual) for (pattern, actual) in zip(func_type.args, arg_types)],
{})
if deductions is None:
errors.append((func, "Unable to deduce type parameters"))
elif len(deductions) < len(func_type.templates):
errors.append((func, "Unable to deduce type parameter %s" % (
", ".join([param.name for param in set(func_type.templates) - set(deductions.keys())]))))
else:
type_list = [deductions[param] for param in func_type.templates]
from Symtab import Entry
specialization = Entry(
name = func.name + "[%s]" % ",".join([str(t) for t in type_list]),
cname = func.cname + "<%s>" % ",".join([t.declaration_code("") for t in type_list]),
type = func_type.specialize(deductions),
pos = func.pos)
candidates.append((specialization, specialization.type))
else:
candidates.append((func, func_type))
# Optimize the most common case of no overloading...
if len(candidates) == 1:
return candidates[0][0]
elif len(candidates) == 0:
if pos is not None:
func, errmsg = errors[0]
if len(errors) == 1 or [1 for func, e in errors if e == errmsg]:
error(pos, errmsg)
else:
error(pos, "no suitable method found")
return None
possibilities = []
bad_types = []
needed_coercions = {}
for index, (func, func_type) in enumerate(candidates):
score = [0,0,0,0]
for i in range(min(len(args), len(func_type.args))):
src_type = args[i].type
dst_type = func_type.args[i].type
assignable = dst_type.assignable_from(src_type)
# Now take care of normal string literals. So when you call a cdef
# function that takes a char *, the coercion will mean that the
# type will simply become bytes. We need to do this coercion
# manually for overloaded and fused functions
if not assignable and src_type.is_pyobject:
if (src_type.is_builtin_type and src_type.name == 'str' and
dst_type.resolve() is c_char_ptr_type):
c_src_type = c_char_ptr_type
else:
c_src_type = src_type.default_coerced_ctype()
if c_src_type:
assignable = dst_type.assignable_from(c_src_type)
if assignable:
src_type = c_src_type
needed_coercions[func] = i, dst_type
if assignable:
if src_type == dst_type or dst_type.same_as(src_type):
pass # score 0
elif func_type.is_strict_signature:
break # exact match requested but not found
elif is_promotion(src_type, dst_type):
score[2] += 1
elif ((src_type.is_int and dst_type.is_int) or
(src_type.is_float and dst_type.is_float)):
score[2] += abs(dst_type.rank + (not dst_type.signed) -
(src_type.rank + (not src_type.signed))) + 1
elif not src_type.is_pyobject:
score[1] += 1
else:
score[0] += 1
else:
error_mesg = "Invalid conversion from '%s' to '%s'"%(src_type,
dst_type)
bad_types.append((func, error_mesg))
break
else:
possibilities.append((score, index, func)) # so we can sort it
if possibilities:
possibilities.sort()
if len(possibilities) > 1:
score1 = possibilities[0][0]
score2 = possibilities[1][0]
if score1 == score2:
if pos is not None:
error(pos, "ambiguous overloaded method")
return None
function = possibilities[0][-1]
if function in needed_coercions and env:
arg_i, coerce_to_type = needed_coercions[function]
args[arg_i] = args[arg_i].coerce_to(coerce_to_type, env)
return function
if pos is not None:
if len(bad_types) == 1:
error(pos, bad_types[0][1])
else:
error(pos, "no suitable method found")
return None
def merge_template_deductions(a, b):
if a is None or b is None:
return None
all = a
for param, value in b.iteritems():
if param in all:
if a[param] != b[param]:
return None
else:
all[param] = value
return all
def widest_numeric_type(type1, type2):
# Given two numeric types, return the narrowest type
# encompassing both of them.
if type1 == type2:
widest_type = type1
elif type1.is_complex or type2.is_complex:
def real_type(ntype):
if ntype.is_complex:
return ntype.real_type
return ntype
widest_type = CComplexType(
widest_numeric_type(
real_type(type1),
real_type(type2)))
elif type1.is_enum and type2.is_enum:
widest_type = c_int_type
elif type1.rank < type2.rank:
widest_type = type2
elif type1.rank > type2.rank:
widest_type = type1
elif type1.signed < type2.signed:
widest_type = type1
else:
widest_type = type2
return widest_type
def independent_spanning_type(type1, type2):
# Return a type assignable independently from both type1 and
# type2, but do not require any interoperability between the two.
# For example, in "True * 2", it is safe to assume an integer
# result type (so spanning_type() will do the right thing),
# whereas "x = True or 2" must evaluate to a type that can hold
# both a boolean value and an integer, so this function works
# better.
if type1 == type2:
return type1
elif (type1 is c_bint_type or type2 is c_bint_type) and (type1.is_numeric and type2.is_numeric):
# special case: if one of the results is a bint and the other
# is another C integer, we must prevent returning a numeric
# type so that we do not lose the ability to coerce to a
# Python bool if we have to.
return py_object_type
span_type = _spanning_type(type1, type2)
if span_type is None:
return error_type
return span_type
def spanning_type(type1, type2):
# Return a type assignable from both type1 and type2, or
# py_object_type if no better type is found. Assumes that the
# code that calls this will try a coercion afterwards, which will
# fail if the types cannot actually coerce to a py_object_type.
if type1 == type2:
return type1
elif type1 is py_object_type or type2 is py_object_type:
return py_object_type
elif type1 is c_py_unicode_type or type2 is c_py_unicode_type:
# Py_UNICODE behaves more like a string than an int
return py_object_type
span_type = _spanning_type(type1, type2)
if span_type is None:
return py_object_type
return span_type
def _spanning_type(type1, type2):
if type1.is_numeric and type2.is_numeric:
return widest_numeric_type(type1, type2)
elif type1.is_builtin_type and type1.name == 'float' and type2.is_numeric:
return widest_numeric_type(c_double_type, type2)
elif type2.is_builtin_type and type2.name == 'float' and type1.is_numeric:
return widest_numeric_type(type1, c_double_type)
elif type1.is_extension_type and type2.is_extension_type:
return widest_extension_type(type1, type2)
elif type1.is_pyobject or type2.is_pyobject:
return py_object_type
elif type1.assignable_from(type2):
if type1.is_extension_type and type1.typeobj_is_imported():
# external types are unsafe, so we use PyObject instead
return py_object_type
return type1
elif type2.assignable_from(type1):
if type2.is_extension_type and type2.typeobj_is_imported():
# external types are unsafe, so we use PyObject instead
return py_object_type
return type2
else:
return None
def widest_extension_type(type1, type2):
if type1.typeobj_is_imported() or type2.typeobj_is_imported():
return py_object_type
while True:
if type1.subtype_of(type2):
return type2
elif type2.subtype_of(type1):
return type1
type1, type2 = type1.base_type, type2.base_type
if type1 is None or type2 is None:
return py_object_type
def simple_c_type(signed, longness, name):
# Find type descriptor for simple type given name and modifiers.
# Returns None if arguments don't make sense.
return modifiers_and_name_to_type.get((signed, longness, name))
def parse_basic_type(name):
base = None
if name.startswith('p_'):
base = parse_basic_type(name[2:])
elif name.startswith('p'):
base = parse_basic_type(name[1:])
elif name.endswith('*'):
base = parse_basic_type(name[:-1])
if base:
return CPtrType(base)
#
basic_type = simple_c_type(1, 0, name)
if basic_type:
return basic_type
#
signed = 1
longness = 0
if name == 'Py_UNICODE':
signed = 0
elif name == 'Py_UCS4':
signed = 0
elif name == 'Py_hash_t':
signed = 2
elif name == 'Py_ssize_t':
signed = 2
elif name == 'ssize_t':
signed = 2
elif name == 'size_t':
signed = 0
else:
if name.startswith('u'):
name = name[1:]
signed = 0
elif (name.startswith('s') and
not name.startswith('short')):
name = name[1:]
signed = 2
longness = 0
while name.startswith('short'):
name = name.replace('short', '', 1).strip()
longness -= 1
while name.startswith('long'):
name = name.replace('long', '', 1).strip()
longness += 1
if longness != 0 and not name:
name = 'int'
return simple_c_type(signed, longness, name)
def c_array_type(base_type, size):
# Construct a C array type.
if base_type is error_type:
return error_type
else:
return CArrayType(base_type, size)
def c_ptr_type(base_type):
# Construct a C pointer type.
if base_type is error_type:
return error_type
else:
return CPtrType(base_type)
def c_ref_type(base_type):
# Construct a C reference type
if base_type is error_type:
return error_type
else:
return CReferenceType(base_type)
def c_const_type(base_type):
# Construct a C const type.
if base_type is error_type:
return error_type
else:
return CConstType(base_type)
def same_type(type1, type2):
return type1.same_as(type2)
def assignable_from(type1, type2):
return type1.assignable_from(type2)
def typecast(to_type, from_type, expr_code):
# Return expr_code cast to a C type which can be
# assigned to to_type, assuming its existing C type
# is from_type.
if (to_type is from_type or
(not to_type.is_pyobject and assignable_from(to_type, from_type))):
return expr_code
elif (to_type is py_object_type and from_type and
from_type.is_builtin_type and from_type.name != 'type'):
# no cast needed, builtins are PyObject* already
return expr_code
else:
#print "typecast: to", to_type, "from", from_type ###
return to_type.cast_code(expr_code)
| bsd-3-clause |
AustereCuriosity/numpy | numpy/distutils/npy_pkg_config.py | 30 | 13241 | from __future__ import division, absolute_import, print_function
import sys
import re
import os
if sys.version_info[0] < 3:
from ConfigParser import RawConfigParser, NoOptionError
else:
from configparser import RawConfigParser, NoOptionError
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
_VAR = re.compile('\$\{([a-zA-Z0-9_-]+)\}')
class FormatError(IOError):
"""
Exception thrown when there is a problem parsing a configuration file.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class PkgNotFound(IOError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
flags = (' ' + line).split(' -')
for flag in flags:
flag = '-' + flag
if len(flag) > 0:
if flag.startswith('-I'):
d['include_dirs'].append(flag[2:].strip())
elif flag.startswith('-L'):
d['library_dirs'].append(flag[2:].strip())
elif flag.startswith('-l'):
d['libraries'].append(flag[2:].strip())
elif flag.startswith('-D'):
d['macros'].append(flag[2:].strip())
else:
d['ignored'].append(flag)
return d
def _escape_backslash(val):
return val.replace('\\', '\\\\')
class LibraryInfo(object):
"""
Object containing build information about a library.
Parameters
----------
name : str
The library name.
description : str
Description of the library.
version : str
Version string.
sections : dict
The sections of the configuration file for the library. The keys are
the section headers, the values the text under each header.
vars : class instance
A `VariableSet` instance, which contains ``(name, value)`` pairs for
variables defined in the configuration file for the library.
requires : sequence, optional
The required libraries for the library to be installed.
Notes
-----
All input parameters (except "sections" which is a method) are available as
attributes of the same name.
"""
def __init__(self, name, description, version, sections, vars, requires=None):
self.name = name
self.description = description
if requires:
self.requires = requires
else:
self.requires = []
self.version = version
self._sections = sections
self.vars = vars
def sections(self):
"""
Return the section headers of the config file.
Parameters
----------
None
Returns
-------
keys : list of str
The list of section headers.
"""
return list(self._sections.keys())
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
return _escape_backslash(val)
def libs(self, section="default"):
val = self.vars.interpolate(self._sections[section]['libs'])
return _escape_backslash(val)
def __str__(self):
m = ['Name: %s' % self.name, 'Description: %s' % self.description]
if self.requires:
m.append('Requires:')
else:
m.append('Requires: %s' % ",".join(self.requires))
m.append('Version: %s' % self.version)
return "\n".join(m)
class VariableSet(object):
"""
Container object for the variables defined in a config file.
`VariableSet` can be used as a plain dictionary, with the variable names
as keys.
Parameters
----------
d : dict
Dict of items in the "variables" section of the configuration file.
"""
def __init__(self, d):
self._raw_data = dict([(k, v) for k, v in d.items()])
self._re = {}
self._re_sub = {}
self._init_parse()
def _init_parse(self):
for k, v in self._raw_data.items():
self._init_parse_var(k, v)
def _init_parse_var(self, name, value):
self._re[name] = re.compile(r'\$\{%s\}' % name)
self._re_sub[name] = value
def interpolate(self, value):
# Brute force: we keep interpolating until there is no '${var}' anymore
# or until interpolated string is equal to input string
def _interpolate(value):
for k in self._re.keys():
value = self._re[k].sub(self._re_sub[k], value)
return value
while _VAR.search(value):
nvalue = _interpolate(value)
if nvalue == value:
break
value = nvalue
return value
def variables(self):
"""
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
"""
return list(self._raw_data.keys())
# Emulate a dict to set/get variables values
def __getitem__(self, name):
return self._raw_data[name]
def __setitem__(self, name, value):
self._raw_data[name] = value
self._init_parse_var(name, value)
def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
d = {}
for name, value in config.items('meta'):
d[name] = value
for k in ['name', 'description', 'version']:
if not k in d:
raise FormatError("Option %s (section [meta]) is mandatory, "
"but not found" % k)
if not 'requires' in d:
d['requires'] = []
return d
def parse_variables(config):
if not config.has_section('variables'):
raise FormatError("No variables section found !")
d = {}
for name, value in config.items("variables"):
d[name] = value
return VariableSet(d)
def parse_sections(config):
return meta_d, r
def pkg_to_filename(pkg_name):
return "%s.ini" % pkg_name
def parse_config(filename, dirs=None):
if dirs:
filenames = [os.path.join(d, filename) for d in dirs]
else:
filenames = [filename]
config = RawConfigParser()
n = config.read(filenames)
if not len(n) >= 1:
raise PkgNotFound("Could not find file(s) %s" % str(filenames))
# Parse meta and variables sections
meta = parse_meta(config)
vars = {}
if config.has_section('variables'):
for name, value in config.items("variables"):
vars[name] = _escape_backslash(value)
# Parse "normal" sections
secs = [s for s in config.sections() if not s in ['meta', 'variables']]
sections = {}
requires = {}
for s in secs:
d = {}
if config.has_option(s, "requires"):
requires[s] = config.get(s, 'requires')
for name, value in config.items(s):
d[name] = value
sections[s] = d
return meta, vars, sections, requires
def _read_config_imp(filenames, dirs=None):
def _read_config(f):
meta, vars, sections, reqs = parse_config(f, dirs)
# recursively add sections and variables of required libraries
for rname, rvalue in reqs.items():
nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
# Update var dict for variables not in 'top' config file
for k, v in nvars.items():
if not k in vars:
vars[k] = v
# Update sec dict
for oname, ovalue in nsections[rname].items():
if ovalue:
sections[rname][oname] += ' %s' % ovalue
return meta, vars, sections, reqs
meta, vars, sections, reqs = _read_config(filenames)
# FIXME: document this. If pkgname is defined in the variables section, and
# there is no pkgdir variable defined, pkgdir is automatically defined to
# the path of pkgname. This requires the package to be imported to work
if not 'pkgdir' in vars and "pkgname" in vars:
pkgname = vars["pkgname"]
if not pkgname in sys.modules:
raise ValueError("You should import %s to get information on %s" %
(pkgname, meta["name"]))
mod = sys.modules[pkgname]
vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
return LibraryInfo(name=meta["name"], description=meta["description"],
version=meta["version"], sections=sections, vars=VariableSet(vars))
# Trivial cache to cache LibraryInfo instances creation. To be really
# efficient, the cache should be handled in read_config, since a same file can
# be parsed many time outside LibraryInfo creation, but I doubt this will be a
# problem in practice
_CACHE = {}
def read_config(pkgname, dirs=None):
"""
Return library info for a package from its configuration file.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of directories - usually including
the NumPy base directory - where to look for npy-pkg-config files.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
misc_util.get_info, misc_util.get_pkg_info
Examples
--------
>>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
>>> type(npymath_info)
<class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
>>> print(npymath_info)
Name: npymath
Description: Portable, core math library implementing C99 standard
Requires:
Version: 0.1 #random
"""
try:
return _CACHE[pkgname]
except KeyError:
v = _read_config_imp(pkg_to_filename(pkgname), dirs)
_CACHE[pkgname] = v
return v
# TODO:
# - implements version comparison (modversion + atleast)
# pkg-config simple emulator - useful for debugging, and maybe later to query
# the system
if __name__ == '__main__':
import sys
from optparse import OptionParser
import glob
parser = OptionParser()
parser.add_option("--cflags", dest="cflags", action="store_true",
help="output all preprocessor and compiler flags")
parser.add_option("--libs", dest="libs", action="store_true",
help="output all linker flags")
parser.add_option("--use-section", dest="section",
help="use this section instead of default for options")
parser.add_option("--version", dest="version", action="store_true",
help="output version")
parser.add_option("--atleast-version", dest="min_version",
help="Minimal version")
parser.add_option("--list-all", dest="list_all", action="store_true",
help="Minimal version")
parser.add_option("--define-variable", dest="define_variable",
help="Replace variable with the given value")
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
raise ValueError("Expect package name on the command line:")
if options.list_all:
files = glob.glob("*.ini")
for f in files:
info = read_config(f)
print("%s\t%s - %s" % (info.name, info.name, info.description))
pkg_name = args[1]
import os
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
else:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
if options.section:
section = options.section
else:
section = "default"
if options.define_variable:
m = re.search('([\S]+)=([\S]+)', options.define_variable)
if not m:
raise ValueError("--define-variable option should be of " \
"the form --define-variable=foo=bar")
else:
name = m.group(1)
value = m.group(2)
info.vars[name] = value
if options.cflags:
print(info.cflags(section))
if options.libs:
print(info.libs(section))
if options.version:
print(info.version)
if options.min_version:
print(info.version >= options.min_version)
| bsd-3-clause |
40223101/w16b_test | static/Brython3.1.1-20150328-091302/Lib/site-packages/highlight.py | 617 | 2518 | import keyword
import _jsre as re
from browser import html
letters = 'abcdefghijklmnopqrstuvwxyz'
letters += letters.upper()+'_'
digits = '0123456789'
builtin_funcs = ("abs|divmod|input|open|staticmethod|all|enumerate|int|ord|str|any|" +
"eval|isinstance|pow|sum|basestring|execfile|issubclass|print|super|" +
"binfile|iter|property|tuple|bool|filter|len|range|type|bytearray|" +
"float|list|raw_input|unichr|callable|format|locals|reduce|unicode|" +
"chr|frozenset|long|reload|vars|classmethod|getattr|map|repr|xrange|" +
"cmp|globals|max|reversed|zip|compile|hasattr|memoryview|round|" +
"__import__|complex|hash|min|set|apply|delattr|help|next|setattr|" +
"buffer|dict|hex|object|slice|coerce|dir|id|oct|sorted|intern")
kw_pattern = '^('+'|'.join(keyword.kwlist)+')$'
bf_pattern = '^('+builtin_funcs+')$'
def highlight(txt, string_color="blue", comment_color="green",
keyword_color="purple"):
res = html.PRE()
i = 0
name = ''
while i<len(txt):
car = txt[i]
if car in ["'",'"']:
k = i+1
while k<len(txt):
if txt[k]==car:
nb_as = 0
j = k-1
while True:
if txt[j]=='\\':
nb_as+=1
j -= 1
else:
break
if nb_as % 2 == 0:
res <= html.SPAN(txt[i:k+1],
style=dict(color=string_color))
i = k
break
k += 1
elif car == '#': # comment
end = txt.find('\n', i)
if end== -1:
res <= html.SPAN(txt[i:],style=dict(color=comment_color))
break
else:
res <= html.SPAN(txt[i:end],style=dict(color=comment_color))
i = end-1
elif car in letters:
name += car
elif car in digits and name:
name += car
else:
if name:
if re.search(kw_pattern,name):
res <= html.SPAN(name,style=dict(color=keyword_color))
elif re.search(bf_pattern,name):
res <= html.SPAN(name,style=dict(color=keyword_color))
else:
res <= name
name = ''
res <= car
i += 1
res <= name
return res | agpl-3.0 |
ningirsu/stepmania-server | smserver/smutils/smpacket/smpacket.py | 1 | 34459 | """
The ```SMpacket`` module
========================
Provide easy utilisation of the stepmania protocol.
:Example:
>>> from smserver.smutils.smpacket import smcommand
>>> from smserver.smutils.smpacket import smpacket
>>> # Create a new packet instance
>>> packet = SMPacket.new(smcommand.SMServerCommand.NSCCM, message="test")
>>> print(packet)
<SMPacketServerNSCCM message="test">
>>> # Binary encode your packet
>>> packet.binary
b'\\x00\\x00\\x00\\x06\\x87test\\x00'
>>> # Decode binary data
>>> packet2 = SMPacket.from_("binary", packet.binary)
>>> print(packet2)
<SMPacketServerNSCCM message="test">
>>> packet = SMPacket.new(smcommand.SMServerCommand.NSCPing)
>>> # JSON encode your packet
>>> packet.json
'{"_command": 128}'
>>> # Decode JSON data
>>> packet2 = SMPacket.from_("json", packet.json)
>>> print(packet2)
<SMPacketServerNSCPing >
"""
import json
from smserver.smutils.smpacket import smcommand
from smserver.smutils.smpacket import smencoder
class _SMPacketMetaclass(type):
"""Metaclass that implements PEP 487 protocol"""
def __init__(cls, name, bases, attrs, **kw):
super().__init__(name, bases, attrs, **kw)
parent_class = super(cls, cls)
if hasattr(parent_class, '__init_subclass_custom__'):
parent_class.__init_subclass_custom__(cls, **kw) #pylint: disable=no-member
class SMPacket(metaclass=_SMPacketMetaclass):
""" Main class for declare/parse packet """
_command_type = smcommand.SMCommand
_payload = []
_subclasses = {}
command = None
def __init__(self, **kwargs):
self.command = self.command
if "_command" in kwargs:
kwargs.pop("_command")
self.opts = kwargs
def __init_subclass_custom__(cls, **_kwargs): #pylint: disable=no-self-argument
command = cls.command
if not command:
return
if command in cls._subclasses:
raise ValueError("Command already defined")
cls._subclasses[command] = cls
def __len__(self):
return 1 + len(self.payload)
def __str__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join(['%s="%s"' % (k, v) for k, v in self.opts.items()]))
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join(['%s="%s"' % (k, v) for k, v in self.opts.items()]))
def __getitem__(self, value):
return self.opts[value]
def __setitem__(self, key, value):
self.opts[key] = value
def get(self, value, default=None):
return self.opts.get(value, default)
@classmethod
def new(cls, command, **kwargs):
"""
Return an instance with the corresponding command.
If no command is found, return None
:Example:
>>> from smserver.smutils.smpacket import *
>>> print(SMPacket.new(smcommand.SMServerCommand.NSCCM, message="msg"))
<SMPacketServerNSCCM message="msg">
"""
if command not in cls._subclasses:
return None
return cls._subclasses[command](**kwargs)
@classmethod
def get_class(cls, command):
"""
Get the class which avec the corresponding command
:Example:
>>> from smserver.smutils.smpacket import *
>>> print(SMPacket.get_class(smcommand.SMServerCommand.NSCCM))
<class 'smserver.smutils.smpacket.smpacket.SMPacketServerNSCCM'>
"""
return cls._subclasses.get(command, None)
@property
def binarycommand(self):
"""
Return the command in a binary string
:Example:
>>> from smserver.smutils.smpacket import *
>>> packet = SMPacket.new(smcommand.SMServerCommand.NSCCM, message="msg")
>>> print(packet.binarycommand)
b'\\x87'
"""
return self.command.value.to_bytes(1, byteorder='big')
@property
def binarysize(self):
"""
Return the size of the packet in a 4 bytes string.
:Example:
>>> from smserver.smutils.smpacket import *
>>> packet = SMPacket.new(smcommand.SMServerCommand.NSCCM, message="msg")
>>> print(packet.binarysize)
b'\\x00\\x00\\x00\\x05'
"""
return len(self).to_bytes(4, byteorder='big')
@property
def data(self):
"""
Return the command + payload in a binary string
:Example:
>>> from smserver.smutils.smpacket import *
>>> packet = SMPacket.new(smcommand.SMServerCommand.NSCCM, message="msg")
>>> print(packet.data)
b'\\x87msg\\x00'
"""
return self.binarycommand + self.payload
@property
def binary(self):
"""
Return the full binary encoded packet (size + command + payload)
:Example:
>>> from smserver.smutils.smpacket import *
>>> packet = SMPacket.new(smcommand.SMServerCommand.NSCCM, message="msg")
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x05\\x87msg\\x00'
"""
return self.binarysize + self.data
@property
def payload(self):
"""
Return the paylaod encoded in binary
:Example:
>>> from smserver.smutils.smpacket import *
>>> packet = SMPacket.new(smcommand.SMServerCommand.NSCCM, message="msg")
>>> print(packet.payload)
b'msg\\x00'
"""
return smencoder.BinaryEncoder.encode(self.opts, self._payload)
@property
def json(self):
"""
Return the JSON encoded packet
:Example:
>>> from smserver.smutils.smpacket import *
>>> packet = SMPacket.new(smcommand.SMServerCommand.NSCPing)
>>> print(packet.json)
{"_command": 128}
"""
return smencoder.JSONEncoder.encode(self.opts, self._payload, command=self.command.value)
@classmethod
def from_payload(cls, payload):
"""
Decode the given binary payload
:Example:
>>> from smserver.smutils.smpacket import *
>>> payload_data = b'msg\\x00'
>>> print(SMPacketServerNSCCM.from_payload(payload_data))
<SMPacketServerNSCCM message="msg">
"""
return cls(
**smencoder.BinaryEncoder.decode(payload, cls._payload)[1]
)
@classmethod
def from_json(cls, payload):
"""
Decode a JSON encoded packet
:Example:
>>> from smserver.smutils.smpacket import *
>>> json_data = '{"message": "msg"}'
>>> print(SMPacketServerNSCCM.from_json(json_data))
<SMPacketServerNSCCM message="msg">
"""
return cls(
**smencoder.JSONEncoder.decode(payload, cls._payload)
)
def to_(self, encoding):
"""
Encode the packet to the specified format (json or binary)
"""
return {
"json": self.json,
"binary": self.binary
}[encoding]
@classmethod
def from_(cls, encoding, data):
"""
Decode the packet from the specified format (json or binary)
"""
return {
"json": cls.parse_json,
"binary": cls.parse_binary
}[encoding](data)
@classmethod
def parse_json(cls, data):
""" Parse a JSON packet """
try:
opts = json.loads(data)
except ValueError:
return None
command = cls._command_type.get(opts.get("_command", -1))
if not command:
return None
return cls.get_class(command).from_json(data)
@classmethod
def parse_data(cls, data):
""" Parse a binary packet """
if not data:
return None
command = cls._command_type.get(data[0])
if not command:
return None
return cls.get_class(command).from_payload(data[1:])
@classmethod
def parse_binary(cls, binary):
""" Parse a binary payload """
if len(binary) < 4:
return None
return cls.parse_data(binary[4:])
class SMOPacketClient(SMPacket):
_command_type = smcommand.SMOClientCommand
class SMOPacketServer(SMPacket):
_command_type = smcommand.SMOServerCommand
class SMOPacketClientLogin(SMOPacketClient):
command = smcommand.SMOClientCommand.LOGIN
_payload = [
(smencoder.SMPayloadType.INT, "player_number", None),
(smencoder.SMPayloadType.INT, "encryption", None),
(smencoder.SMPayloadType.NT, "username", None),
(smencoder.SMPayloadType.NT, "password", None)
]
class SMOPacketClientEnterRoom(SMOPacketClient):
command = smcommand.SMOClientCommand.ENTERROOM
_payload = [
(smencoder.SMPayloadType.INT, "enter", None),
(smencoder.SMPayloadType.NT, "room", None),
(smencoder.SMPayloadType.NT, "password", None)
]
class SMOPacketClientCreateRoom(SMOPacketClient):
command = smcommand.SMOClientCommand.CREATEROOM
_payload = [
(smencoder.SMPayloadType.INT, "type", None),
(smencoder.SMPayloadType.NT, "title", None),
(smencoder.SMPayloadType.NT, "description", None),
(smencoder.SMPayloadType.NT, "password", None)
]
class SMOPacketClientRoomInfo(SMOPacketClient):
command = smcommand.SMOClientCommand.ROOMINFO
_payload = [
(smencoder.SMPayloadType.NT, "room", None)
]
class SMOPacketServerLogin(SMOPacketServer):
command = smcommand.SMOServerCommand.LOGIN
_payload = [
(smencoder.SMPayloadType.INT, "approval", None),
(smencoder.SMPayloadType.NT, "text", None)
]
class SMOPacketServerRoomUpdate(SMOPacketServer):
command = smcommand.SMOServerCommand.ROOMUPDATE
_payload = [
(smencoder.SMPayloadType.INT, "type", None),
(smencoder.SMPayloadType.MAP, "room_title", ("type", {
0: (smencoder.SMPayloadType.NT, None, None),
})),
(smencoder.SMPayloadType.MAP, "room_description", ("type", {
0: (smencoder.SMPayloadType.NT, None, None),
})),
(smencoder.SMPayloadType.MAP, "room_type", ("type", {
0: (smencoder.SMPayloadType.INT, None, 1),
})),
(smencoder.SMPayloadType.MAP, "subroom", ("type", {
0: (smencoder.SMPayloadType.INT, None, 1),
})),
(smencoder.SMPayloadType.MAP, "nb_rooms", ("type", {
1: (smencoder.SMPayloadType.INT, None, 1),
})),
(smencoder.SMPayloadType.MAP, "rooms", ("type", {
1: (smencoder.SMPayloadType.LIST, None, ("nb_rooms", [
(smencoder.SMPayloadType.NT, "title", None),
(smencoder.SMPayloadType.NT, "description", None),
])),
})),
(smencoder.SMPayloadType.MAP, "room_status", ("type", {
1: (smencoder.SMPayloadType.INTLIST, None, (1, "nb_rooms")),
})),
(smencoder.SMPayloadType.MAP, "room_flags", ("type", {
1: (smencoder.SMPayloadType.INTLIST, None, (1, "nb_rooms")),
})),
]
class SMOPacketServerGeneralInfo(SMOPacketServer):
command = smcommand.SMOServerCommand.GENERALINFO
_payload = [
(smencoder.SMPayloadType.INT, "format", None),
]
class SMOPacketServerRoomInfo(SMOPacketServer):
command = smcommand.SMOServerCommand.ROOMINFO
_payload = [
(smencoder.SMPayloadType.NT, "song_title", None),
(smencoder.SMPayloadType.NT, "song_subtitle", None),
(smencoder.SMPayloadType.NT, "song_artist", None),
(smencoder.SMPayloadType.INT, "num_players", None),
(smencoder.SMPayloadType.INT, "max_players", None),
(smencoder.SMPayloadType.NTLIST, "players", "num_players"),
]
class SMPacketClientNSCPing(SMPacket):
"""
Client command 000. (Ping)
This command will cause server to respond with a PingR Command
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketClientNSCPing()
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x01\\x00'
"""
command = smcommand.SMClientCommand.NSCPing
_payload = []
class SMPacketClientNSCPingR(SMPacket):
"""
Client command 001. (Ping response)
This command is used to respond to Ping Command.
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketClientNSCPingR()
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x01\\x01'
"""
command = smcommand.SMClientCommand.NSCPingR
_payload = []
class SMPacketClientNSCHello(SMPacket):
"""
Client command 002. (Hello)
This is the first packet from a client to server.
:param int version: Client protocol version
:param str name: Name of the stepmania build
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketClientNSCHello(
... name="stepmania",
... version=128
... )
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x0c\\x02\\x80stepmania\\x00'
"""
command = smcommand.SMClientCommand.NSCHello
_payload = [
(smencoder.SMPayloadType.INT, "version", None),
(smencoder.SMPayloadType.NT, "name", None)
]
class SMPacketClientNSCGSR(SMPacket):
"""
Client command 003 (Game Start Request)
This command is called once after most loading is done, and again
immediately before the sound starts.
The server has to respond with a SMPacketServerNSCGSR, if not the
client will freeze.
:param int first_player_feet: Primary player feet (0 for no player)
:param int second_player_feet: Secondary player feet (0 for no player)
:param int first_player_difficulty: Primary player difficulty (0=Beginner, 1=easy, etc.)
:param int second_player_difficulty: Secondary player difficulty (0=Beginner, 1=easy, etc.)
:param int start_position: (0 is pre-sync, 1 is for sync)
:param int reserved: ignored
:param str song_title: Title of the song to play
:param str song_subtitle: Subtitle of the song to play
:param str song_artist: Artist of the song to play
:param str course_title: Course Title
:param str song_options: Song option in string format
:param str first_player_options: Primary player's option
:param str second_player_options: Secondary player's option
"""
command = smcommand.SMClientCommand.NSCGSR
_payload = [
(smencoder.SMPayloadType.MSN, "first_player_feet", None),
(smencoder.SMPayloadType.LSN, "second_player_feet", None),
(smencoder.SMPayloadType.MSN, "first_player_difficulty", None),
(smencoder.SMPayloadType.LSN, "second_player_difficulty", None),
(smencoder.SMPayloadType.MSN, "start_position", None),
(smencoder.SMPayloadType.LSN, "reserved", None),
(smencoder.SMPayloadType.NT, "song_title", None),
(smencoder.SMPayloadType.NT, "song_subtitle", None),
(smencoder.SMPayloadType.NT, "song_artist", None),
(smencoder.SMPayloadType.NT, "course_title", None),
(smencoder.SMPayloadType.NT, "song_options", None),
(smencoder.SMPayloadType.NT, "first_player_options", None),
(smencoder.SMPayloadType.NT, "second_player_options", None),
]
class SMPacketClientNSCGON(SMPacket):
"""
Client command 004 (Game Over Notice)
This command is sent when end of game is encounter.
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketClientNSCGON()
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x01\\x04'
"""
command = smcommand.SMClientCommand.NSCGON
class SMPacketClientNSCGSU(SMPacket):
"""
Client command 005 (Game Status update)
Update game info for each step in the game
:param int player_id: player # (0 or 1)
:param int step_id: (1: hitMine, 2: AvoidMine, ...)
:param int grade: Projected Grade (0: AAAA, 1: AAA, ...)
:param int reserved: ignored
:param int score: Actual score
:param int combo: Actual combo
:param int health: Actual health
:param int offset: Offset from the note (32767=miss)
"""
command = smcommand.SMClientCommand.NSCGSU
_payload = [
(smencoder.SMPayloadType.MSN, "player_id", None),
(smencoder.SMPayloadType.LSN, "step_id", None),
(smencoder.SMPayloadType.MSN, "grade", None),
(smencoder.SMPayloadType.LSN, "reserved", None),
(smencoder.SMPayloadType.INT, "score", 4),
(smencoder.SMPayloadType.INT, "combo", 2),
(smencoder.SMPayloadType.INT, "health", 2),
(smencoder.SMPayloadType.INT, "offset", 2)
]
class SMPacketClientNSCSU(SMPacket):
"""
Client command 006 (Style Update)
This is sent when a profile is choosed. It also indicates the number
of players in the local client. (1 or 2)
:param int nb_players: Number of players in the client (1 or 2)
:param int player_id: Player ID (0 or 1)
:param str player_name: Player name
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketClientNSCSU(
... nb_players=2,
... player_id=0,
... player_name="profile1",
... )
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x0c\\x06\\x02\\x00profile1\\x00'
"""
command = smcommand.SMClientCommand.NSCSU
_payload = [
(smencoder.SMPayloadType.INT, "nb_players", None),
(smencoder.SMPayloadType.INT, "player_id", None),
(smencoder.SMPayloadType.NT, "player_name", None),
]
class SMPacketClientNSCCM(SMPacket):
"""
Client command 007 (Chat Message)
The user typed a message for general chat.
:param str message: The message sent by the client.
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketClientNSCCM(message="Client message")
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x10\\x07Client message\\x00'
"""
command = smcommand.SMClientCommand.NSCCM
_payload = [
(smencoder.SMPayloadType.NT, "message", None),
]
class SMPacketClientNSCRSG(SMPacket):
"""
Client command 008 (Request Start Game)
Request Start Game and Tell server existance/non existance of song:
The user selected a song on a Net-enabled selection
:param int usage: Usage for this message
:param str song_title: Song title
:param str song_subtitle: Song artist
:param str song_artist: Song subtitle
:Example:
>>> # Client select the song ('Title', by 'Artist').
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketClientNSCRSG(
... usage=2,
... song_title="Title",
... song_artist="Artist",
... )
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x10\\x08\\x02Title\\x00Artist\\x00\\x00'
"""
command = smcommand.SMClientCommand.NSCRSG
_payload = [
(smencoder.SMPayloadType.INT, "usage", 1),
(smencoder.SMPayloadType.NT, "song_title", None),
(smencoder.SMPayloadType.NT, "song_artist", None),
(smencoder.SMPayloadType.NT, "song_subtitle", None),
]
class SMPacketClientNSCCUUL(SMPacket):
"""
Client command 009 (reserved)
"""
command = smcommand.SMClientCommand.NSCCUUL
class SMPacketClientNSSCSMS(SMPacket):
"""
Client command 010 (User status)
Indicate where the user is
:param int action: Int enum indicating where the user is
Action available:
* 0: exited ScreenNetSelectMusic
* 1: entered ScreenNetSelectMusic
* 2: Not Sent
* 3: entered options screen
* 4: exited the evaluation screen
* 5: entered evaluation screen
* 6: exited ScreenNetRoom
* 7: entered ScreenNetRoom
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> # Client enter in room selection
>>> packet = smpacket.SMPacketClientNSSCSMS(
... action=7,
... )
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x02\\n\\x07'
"""
command = smcommand.SMClientCommand.NSSCSMS
_payload = [
(smencoder.SMPayloadType.INT, "action", None),
]
class SMPacketClientNSCUOpts(SMPacket):
"""
Client command 011 (User options)
User has changed player's options
:param str player_0: Player 0 options
:param str player_1: Player 1 options
"""
command = smcommand.SMClientCommand.NSCUOpts
_payload = [
(smencoder.SMPayloadType.NT, "player_0", None),
(smencoder.SMPayloadType.NT, "player_1", None),
]
class SMPacketClientNSSMONL(SMPacket):
"""
Client command 012 (SMOnline Packet)
The SMLan packet 12 is a wrapper for the SMOnline packet.
:param packet: The SMOPacket to include
:type packet: SMOPacketClient
"""
command = smcommand.SMClientCommand.NSSMONL
_payload = [
(smencoder.SMPayloadType.PACKET, "packet", SMOPacketClient)
]
class SMPacketClientNSCFormatted(SMPacket):
"""
Client command 013 (reserved)
"""
command = smcommand.SMClientCommand.NSCFormatted
class SMPacketClientNSCAttack(SMPacket):
"""
Client command 014 (reserved)
"""
command = smcommand.SMClientCommand.NSCAttack
class SMPacketClientXMLPacket(SMPacket):
"""
Client command 15 (XMLPacket)
This packet contains data in XML format.
:param str xml: XML string
"""
command = smcommand.SMClientCommand.XMLPacket
_payload = [
(smencoder.SMPayloadType.NT, "xml", None),
]
class SMPacketServerNSCPing(SMPacket):
"""
Server command 128 (Ping)
This command will cause client to respond with a PingR command
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketServerNSCPing()
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x01\\x80'
"""
command = smcommand.SMServerCommand.NSCPing
class SMPacketServerNSCPingR(SMPacket):
"""
Server command 129 (PingR)
This command is used to respond to a Ping command.
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketServerNSCPingR()
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x01\\x81'
"""
command = smcommand.SMServerCommand.NSCPingR
class SMPacketServerNSCHello(SMPacket):
"""
Server command 130 (Hello)
This command introduces the server. (In response of Client Hello
command)
:param str version: The server protocol version (always 128)
:param str name: Name of the server
:param int key: Random key, used for hash password
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketServerNSCHello(
... version=128,
... name="MyServer",
... key=999999999
... )
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x0f\\x82\\x80MyServer\\x00;\\x9a\\xc9\\xff'
"""
command = smcommand.SMServerCommand.NSCHello
_payload = [
(smencoder.SMPayloadType.INT, "version", None),
(smencoder.SMPayloadType.NT, "name", None),
(smencoder.SMPayloadType.INT, "key", 4)
]
class SMPacketServerNSCGSR(SMPacket):
"""
Server command 131 (Allow Start)
This will cause the client to start the game
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketServerNSCGSR()
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x01\\x83'
"""
command = smcommand.SMServerCommand.NSCGSR
class SMPacketServerNSCGON(SMPacket):
"""
Server command 132 (Game over stats)
This packet is send in response to the game over packet. It
contains information regarding how well each player did.
:param int nb_players: NB of players stats in this packet (size of the next list)
:param list ids: Player's ID (calculate from the SMPacketServerNSCUUL)
:param list score: Player's score
:param list grade: Player's grade
:param list difficulty: Player's difficulty
:param list flawless: NB of flawless note
:param list perfect: NB of perfect note
:param list great: NB of great note
:param list good: NB of good note
:param list bad: NB of bad note
:param list miss: NB of miss note
:param list held: NB of held note
:param list max_combo: Player's max combo
:param list options: Player's options
"""
command = smcommand.SMServerCommand.NSCGON
_payload = [
(smencoder.SMPayloadType.INT, "nb_players", 1),
(smencoder.SMPayloadType.INTLIST, "ids", (1, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "score", (4, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "grade", (1, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "difficulty", (1, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "flawless", (2, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "perfect", (2, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "great", (2, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "good", (2, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "bad", (2, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "miss", (2, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "held", (2, "nb_players")),
(smencoder.SMPayloadType.INTLIST, "max_combo", (2, "nb_players")),
(smencoder.SMPayloadType.NTLIST, "options", "nb_players"),
]
class SMPacketServerNSCGSU(SMPacket):
"""
Server command 133 (Scoreboard update)
This will update the client's scoreboard.
:param int section: Which section to update (0: names, 1:combos, 2: grades)
:param int nb_players: Nb of plyaers in this packet
:param list options: Int list contining names, combos or grades
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketServerNSCGSU(
... section=1, # Update the actual combo
... nb_players=2, # 2 users in this packet
... options=[12, 5] # List containing the combos
... )
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x07\\x85\\x01\\x02\\x00\\x0c\\x00\\x05'
"""
command = smcommand.SMServerCommand.NSCGSU
_payload = [
(smencoder.SMPayloadType.INT, "section", 1),
(smencoder.SMPayloadType.INT, "nb_players", 1),
(smencoder.SMPayloadType.MAP, "options", ("section", {
0: (smencoder.SMPayloadType.INTLIST, None, (1, "nb_players")),
1: (smencoder.SMPayloadType.INTLIST, None, (2, "nb_players")),
2: (smencoder.SMPayloadType.INTLIST, None, (1, "nb_players")),
}))
]
class SMPacketServerNSCSU(SMPacket):
"""
Server command 134 (System Message)
Send a system message to user
:param str message: The message to send
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketServerNSCSU(message="System message")
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x10\\x86System message\\x00'
"""
command = smcommand.SMServerCommand.NSCSU
_payload = [
(smencoder.SMPayloadType.NT, "message", None)
]
class SMPacketServerNSCCM(SMPacket):
"""
Server command 135 (Chat Message)
Add a chat message to the chat window on some StepMania screens.
:param str message: The message to add
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketServerNSCSU(message="Client message")
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x10\\x86Client message\\x00'
"""
command = smcommand.SMServerCommand.NSCCM
_payload = [
(smencoder.SMPayloadType.NT, "message", None)
]
class SMPacketServerNSCRSG(SMPacket):
"""
Server command 136 (Request Start Game)
Tell client to start song/ask if client has song
:param int usage: Usage of this message
:param str song_title: Song title
:param str song_artist: Song artist
:param str song_subtitle: Song subtitle
Usage available:
* 0: See if client has song
* 1: See if client has song, if so, scroll to song
* 2: See if client has song, if so, scroll to song, and play that song
* 3: Blindly start song
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> packet = smpacket.SMPacketServerNSCRSG(
... usage=0, # Check song presence
... song_title="title",
... song_artist="artist",
... song_subtitle="subtitle",
... )
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x18\\x88\\x00title\\x00artist\\x00subtitle\\x00'
"""
command = smcommand.SMServerCommand.NSCRSG
_payload = [
(smencoder.SMPayloadType.INT, "usage", 1),
(smencoder.SMPayloadType.NT, "song_title", None),
(smencoder.SMPayloadType.NT, "song_artist", None),
(smencoder.SMPayloadType.NT, "song_subtitle", None),
]
class SMPacketServerNSCCUUL(SMPacket):
"""
Server command 137 (Update user list)
This sends all the users currently connected
:param int max_players: NB max of players (max 255)
:param int nb_players: NB of player's in this packet
:param list players: List containing status and name for each user
"""
command = smcommand.SMServerCommand.NSCCUUL
_payload = [
(smencoder.SMPayloadType.INT, "max_players", 1),
(smencoder.SMPayloadType.INT, "nb_players", 1),
(smencoder.SMPayloadType.LIST, "players", ("nb_players", [
(smencoder.SMPayloadType.INT, "status", 1),
(smencoder.SMPayloadType.NT, "name", None),
])
)
]
class SMPacketServerNSSCSMS(SMPacket):
"""
Server command 138
Force change to Networking select music screen.
:param str gametype: Set specified gametype
:param str style: Set specified style
"""
command = smcommand.SMServerCommand.NSSCSMS
_payload = [
(smencoder.SMPayloadType.NT, "gametype", None),
(smencoder.SMPayloadType.NT, "style", None),
]
class SMPacketServerNSCUOpts(SMPacket):
"""
Server command 139 (reserved)
"""
command = smcommand.SMServerCommand.NSCUOpts
class SMPacketServerNSSMONL(SMPacket):
"""
Server command 140 (SMOnline Packet)
The SMLan packet 140 is a wrapper for the SMOnline packet.
:param packet: The SMOPacket to include
:type packet: SMOPacketServer
"""
command = smcommand.SMServerCommand.NSSMONL
_payload = [
(smencoder.SMPayloadType.PACKET, "packet", SMOPacketServer)
]
class SMPacketServerNSCFormatted(SMPacket):
"""
Server command 141 (Formatted information packet)
Send formatted information regarding the server back to the player.
:param str server_name: Server name
:param int server_port: Port the server is listening on
:param int nb_players: Number of players connected
"""
command = smcommand.SMServerCommand.NSCFormatted
_payload = [
(smencoder.SMPayloadType.NT, "server_name", None),
(smencoder.SMPayloadType.INT, "server_port", 2),
(smencoder.SMPayloadType.INT, "nb_players", 2),
]
class SMPacketServerNSCAttack(SMPacket):
"""
Server command 142 (Attack Client)
:param int player: Player number (0 or 1)
:param int time: Duration of the attack (in ms)
:param attack: Text describing modifiers
:type attack: str or smserver.smutils.smattack.SMAttack
List of attack available are in smattack module.
:Example:
>>> from smserver.smutils.smpacket import smpacket
>>> from smserver.smutils import smattack
>>> packet = smpacket.SMPacketServerNSCAttack(
... player=0, # Send the attack to the player 0
... time=1000, # The attack will last 1 second
... attack='drunk', #Send a drunk attack
... )
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x0c\\x8e\\x00\\x00\\x00\\x03\\xe8drunk\\x00'
>>> packet = smpacket.SMPacketServerNSCAttack(
... player=0,
... time=1000,
... attack=smattack.SMAttack.Drunk, # Use an Enum value
... )
>>> print(packet.binary)
b'\\x00\\x00\\x00\\x0c\\x8e\\x00\\x00\\x00\\x03\\xe8drunk\\x00'
"""
def __init__(self, player=0, time=1000, attack=None):
if not isinstance(attack, str):
attack = attack.value
SMPacket.__init__(self, player=player, time=time, attack=attack)
command = smcommand.SMServerCommand.NSCAttack
_payload = [
(smencoder.SMPayloadType.INT, "player", 1),
(smencoder.SMPayloadType.INT, "time", 4),
(smencoder.SMPayloadType.NT, "attack", None),
]
class SMPacketServerXMLPacket(SMPacket):
"""
Server command 143 (XMLPacket)
This packet contains data in XML format.
:param str xml: XML string
"""
command = smcommand.SMServerCommand.XMLPacket
_payload = [
(smencoder.SMPayloadType.NT, "xml", None),
]
| mit |
bobvanderlinden/machinekit | configs/sim/axis/orphans/pysubs/remap.py | 28 | 7604 | import os
import signal
from interpreter import *
import emccanon
#
# to remap Tx (prepare) to an NGC file 'prepare.ngc', incantate like so:
#
# REMAP=T prolog=prepare_prolog epilog=prepare_epilog ngc=prepare
# This means:
#
# prolog=prepare_prolog
# before calling prepare.ngc, execute the Python function 'prepare_prolog'
# ngc=prepare
# your O-word procedure goes to prepare.ngc
# epilog=prepare_epilog
# after calling prepare.ngc, execute the Python function 'prepare_epilog'
#
def prepare_prolog(self, userdata,**words):
self.params[5599] = 1 # turn on DEBUG, output
self._tool = int(words['t'])
if self._tool:
(status,self._pocket) = self.find_tool_pocket(self._tool)
if status != INTERP_OK:
self.set_errormsg("T%d: pocket not found" % (self._tool))
return status
else:
self._pocket = -1 # this is an T0 - tool unload
# these variables will be visible in the ngc oword sub
# as #<tool> and #<pocket> as local variables
self.params["tool"] = self._tool
self.params["pocket"] = self._pocket
return INTERP_OK
# The minimal ngc prepare procedure looks like so:
#
# o<r_prepare> sub
# (debug, r_prepare tool=#<tool> pocket=#<pocket>)
#
# returning a negative value fails the prepare command
# returning a positive value sets this value as the new pocket:
# o<r_prepare> endsub [#<pocket>]
# m2
# the prepare epilog looks at the return value from the NGC procedure
# and does the right thing:
def prepare_epilog(self, userdata, **words):
#print "prepare_epilog cl=",self.call_level, self._pocket
retval = self.return_value
if retval >= 0:
self.selected_pocket = self._pocket
self.selected_tool = self._tool
emccanon.SELECT_POCKET( self._pocket, self._tool)
return INTERP_OK
else:
self.set_errormsg("T%d: aborted (return code %.4f)" % (self._tool,retval))
return INTERP_ERROR
#
# M6 remapped to a NGC handler, with Python prolog+epilog
#
# Incantation:
# REMAP=M6 modalgroup=6 argspec=- prolog=change_prolog ngc=change epilog=change_epilog
#
def change_prolog(self, userdata,**words):
if self.selected_pocket < 0:
self.set_errormsg("Need tool prepared -Txx- for toolchange")
return INTERP_ERROR
if self.cutter_comp_side:
self.set_errormsg("Cannot change tools with cutter radius compensation on")
return INTERP_ERROR
# bug in interp_convert.cc: WONT WORK - isnt valid anymore
## settings->selected_pocket);
## settings->tool_table[0].toolno, <--- BROKEN
## block->t_number,
#self.params["prepared" ] = 2
self.params["tool_in_spindle"] = self.current_tool
self.params["selected_pocket"] = self.selected_pocket
return INTERP_OK
def change_epilog(self, userdata,**words):
retval = self.return_value
print "change_epilog retval=% selected_pocket=%d" %(retval,self.selected_pocket)
if retval > 0:
# commit change
#emccanon.CHANGE_TOOL(self.selected_pocket)
emccanon.CHANGE_TOOL(self.selected_tool)
self.current_pocket = self.selected_pocket
# cause a sync()
self.tool_change_flag = True
self.set_tool_parameters()
return INTERP_OK
else:
self.set_errormsg("M6 aborted (return code %.4f)" % (retval))
return INTERP_ERROR
# M61 remapped to an all-Python handler
# demo - this really does the same thing as the builtin (non-remapped) M61
#
# Incantation:
#
# REMAP=M61 python=set_tool_number
#
# This means:
#
# argspec=Q- :
# a mandatory Q parameter word is required, others are ignored
#
# python=set_tool_number
# the following function is executed on M61:
#
def set_tool_number(self, userdata,**words):
toolno = int(words['q'])
(status,pocket) = self.find_tool_pocket(toolno)
if status != INTERP_OK:
self.set_errormsg("M61 failed: requested tool %d not in table" % (toolno))
return status
if words['q'] > -TOLERANCE_EQUAL:
self.current_pocket = pocket
emccanon.CHANGE_TOOL_NUMBER(pocket)
# test: self.tool_table[0].offset.tran.z = self.tool_table[pocket].offset.tran.z
# cause a sync()
self.tool_change_flag = True
self.set_tool_parameters()
return INTERP_OK
else:
self.set_errormsg("M61 failed: Q=%4" % (toolno))
return INTERP_ERROR
#
# This demonstrates how queuebusters
# (toolchange, wait for input, probe) can be dealt with in Python handlers.
#
# on the initial call, userdata equals zero.
# if a queuebuster is executed, the function is expected to return
# (INTERP_EXECUTE_FINISH,<optional new userdata value>
#
# Post sync, the function is called again with the userdata value
# returned previously for continuation.
#
def test_reschedule(self, userdata,**words):
if userdata > 0:
# we were called post-sync():
pin_status = emccanon.GET_EXTERNAL_DIGITAL_INPUT(0,0);
print "pin status=",pin_status
return INTERP_OK # done
else:
# wait for digital-input 00 to go hi for 5secs
emccanon.WAIT(0,1,2,5.0)
# pls call again after sync() with new userdata value
return (INTERP_EXECUTE_FINISH,userdata + 1)
#------ demonstrate task signal handlers --
def gen_backtrace(self, userdata,**words):
if 'emctask' in sys.builtin_module_names:
os.kill(os.getpid(), signal.SIGUSR2)
return INTERP_OK
def gdb_window(self, userdata,**words):
if 'emctask' in sys.builtin_module_names:
os.kill(os.getpid(), signal.SIGUSR1)
return INTERP_OK
#---------------- debugging fluff ----------
# named parameters table
def symbols(self, userdata, **words):
self.print_named_params(words.has_key('p'))
return INTERP_OK
# tool table access
def print_tool(self, userdata, **words):
n = 0
if words['p']:
n = int(words['p'])
print "tool %d:" % (n)
print "tool number:", self.tool_table[n].toolno
print "tool offset x:", self.tool_table[n].offset.tran.x
print "tool offset y:", self.tool_table[n].offset.tran.y
print "tool offset z:", self.tool_table[n].offset.tran.z
return INTERP_OK
def set_tool_zoffset(self, userdata, **words):
n = int(words['p'])
self.tool_table[n].offset.tran.z = words['q']
if n == 0:
self.set_tool_parameters()
return INTERP_OK
def printobj(b,header=""):
print "object ",header,":"
for a in dir(b):
if not a.startswith('_'):
if hasattr(b,a):
print a,getattr(b,a)
def introspect(args,**kwargs):
print "----- introspect:"
r = self.remap_level
print "call_level=",self.call_level, "remap_level=",self.remap_level
print "selected_pocket=",self.selected_pocket
print "blocks[r].comment=",self.blocks[r].comment
print "blocks[r].seq=",self.blocks[r].line_number
print "blocks[r].p_flag=",self.blocks[r].p_flag
print "blocks[r].p_number=",self.blocks[r].p_number
print "blocks[r].q_flag=",self.blocks[r].q_flag
print "blocks[r].q_number=",self.blocks[r].q_number
#printobj(interp,"interp")
printobj(self.tool_offset,"tool_offset")
callstack()
for i in [5220,"_metric","_absolute","_tool_offset","_feed","_rpm"]:
print "param",i,"=",self.params[i]
print "blocks[r].executing_remap:",
print "name=",self.blocks[r].executing_remap.name
print "argspec=",self.blocks[r].executing_remap.argspec
print "prolog=",self.blocks[r].executing_remap.prolog_func
print "py=",self.blocks[r].executing_remap.remap_py
print "ngc=",self.blocks[r].executing_remap.remap_ngc
print "epilog=",self.blocks[r].executing_remap.epilog_func
return INTERP_OK
def null(args,**kwargs):
return INTERP_OK
def callstack():
for i in range(len(self.sub_context)):
print "-------- call_level: ",i
print "position=",self.sub_context[i].position
print "sequence_number=",self.sub_context[i].sequence_number
print "filenameposition=",self.sub_context[i].filename
print "subname=",self.sub_context[i].subname
print "context_status=",self.sub_context[i].context_status
return INTERP_OK
| lgpl-2.1 |
clairetang6/bokeh | tests/examples/collect_examples.py | 8 | 2668 | import yaml
import os
from os.path import join, dirname, abspath, pardir
base_dir = dirname(__file__)
example_dir = abspath(join(base_dir, pardir, pardir, 'examples'))
class Flags(object):
file = 1 << 1
server = 1 << 2
notebook = 1 << 3
animated = 1 << 4
skip = 1 << 5
def example_type(flags):
if flags & Flags.file:
return "file"
elif flags & Flags.server:
return "server"
elif flags & Flags.notebook:
return "notebook"
def add_examples(list_of_examples, path, example_type=None, skip=None):
example_path = join(example_dir, path)
if skip is not None:
skip = set(skip)
for f in os.listdir(example_path):
flags = 0
if f.startswith(('_', '.')):
continue
elif f.endswith(".py"):
if example_type is not None:
flags |= example_type
elif "server" in f or "animate" in f:
flags |= Flags.server
else:
flags |= Flags.file
elif f.endswith(".ipynb"):
flags |= Flags.notebook
else:
continue
if "animate" in f:
flags |= Flags.animated
if flags & Flags.file:
raise ValueError("file examples can't be animated")
if skip and f in skip:
flags |= Flags.skip
list_of_examples.append((join(example_path, f), flags))
return list_of_examples
def get_all_examples():
# Make a list of all the examples
list_of_examples = []
with open(join(dirname(__file__), "examples.yaml"), "r") as f:
examples = yaml.load(f.read())
for example in examples:
path = example["path"]
try:
example_type = getattr(Flags, example["type"])
except KeyError:
example_type = None
skip_status = example.get("skip")
list_of_examples = add_examples(list_of_examples, path, example_type=example_type, skip=skip_status)
return list_of_examples
def get_file_examples():
all_examples = get_all_examples()
file_examples = [example for example, flags in all_examples if (flags & Flags.file) and not (flags & Flags.skip)]
return file_examples
def get_server_examples():
all_examples = get_all_examples()
server_examples = [example for example, flags in all_examples if (flags & Flags.server) and not (flags & Flags.skip)]
return server_examples
def get_notebook_examples():
all_examples = get_all_examples()
notebook_examples = [example for example, flags in all_examples if (flags & Flags.notebook) and not (flags & Flags.skip)]
return notebook_examples
| bsd-3-clause |
rdo-management/ironic | ironic/db/sqlalchemy/models.py | 5 | 7091 | # -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for baremetal data.
"""
import json
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_db.sqlalchemy import models
import six.moves.urllib.parse as urlparse
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import ForeignKey, Integer
from sqlalchemy import schema, String, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.types import TypeDecorator, TEXT
from ironic.common import paths
sql_opts = [
cfg.StrOpt('mysql_engine',
default='InnoDB',
help='MySQL engine to use.')
]
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('ironic.sqlite')
cfg.CONF.register_opts(sql_opts, 'database')
db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'ironic.sqlite')
def table_args():
engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme
if engine_name == 'mysql':
return {'mysql_engine': cfg.CONF.database.mysql_engine,
'mysql_charset': "utf8"}
return None
class JsonEncodedType(TypeDecorator):
"""Abstract base type serialized as json-encoded string in db."""
type = None
impl = TEXT
def process_bind_param(self, value, dialect):
if value is None:
# Save default value according to current type to keep the
# interface the consistent.
value = self.type()
elif not isinstance(value, self.type):
raise TypeError("%s supposes to store %s objects, but %s given"
% (self.__class__.__name__,
self.type.__name__,
type(value).__name__))
serialized_value = json.dumps(value)
return serialized_value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class JSONEncodedDict(JsonEncodedType):
"""Represents dict serialized as json-encoded string in db."""
type = dict
class JSONEncodedList(JsonEncodedType):
"""Represents list serialized as json-encoded string in db."""
type = list
class IronicBase(models.TimestampMixin,
models.ModelBase):
metadata = None
def as_dict(self):
d = {}
for c in self.__table__.columns:
d[c.name] = self[c.name]
return d
def save(self, session=None):
import ironic.db.sqlalchemy.api as db_api
if session is None:
session = db_api.get_session()
super(IronicBase, self).save(session)
Base = declarative_base(cls=IronicBase)
class Chassis(Base):
"""Represents a hardware chassis."""
__tablename__ = 'chassis'
__table_args__ = (
schema.UniqueConstraint('uuid', name='uniq_chassis0uuid'),
table_args()
)
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
extra = Column(JSONEncodedDict)
description = Column(String(255), nullable=True)
class Conductor(Base):
"""Represents a conductor service entry."""
__tablename__ = 'conductors'
__table_args__ = (
schema.UniqueConstraint('hostname', name='uniq_conductors0hostname'),
table_args()
)
id = Column(Integer, primary_key=True)
hostname = Column(String(255), nullable=False)
drivers = Column(JSONEncodedList)
online = Column(Boolean, default=True)
class Node(Base):
"""Represents a bare metal node."""
__tablename__ = 'nodes'
__table_args__ = (
schema.UniqueConstraint('uuid', name='uniq_nodes0uuid'),
schema.UniqueConstraint('instance_uuid',
name='uniq_nodes0instance_uuid'),
schema.UniqueConstraint('name', name='uniq_nodes0name'),
table_args())
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
# NOTE(deva): we store instance_uuid directly on the node so that we can
# filter on it more efficiently, even though it is
# user-settable, and would otherwise be in node.properties.
instance_uuid = Column(String(36), nullable=True)
name = Column(String(255), nullable=True)
chassis_id = Column(Integer, ForeignKey('chassis.id'), nullable=True)
power_state = Column(String(15), nullable=True)
target_power_state = Column(String(15), nullable=True)
provision_state = Column(String(15), nullable=True)
target_provision_state = Column(String(15), nullable=True)
provision_updated_at = Column(DateTime, nullable=True)
last_error = Column(Text, nullable=True)
instance_info = Column(JSONEncodedDict)
properties = Column(JSONEncodedDict)
driver = Column(String(15))
driver_info = Column(JSONEncodedDict)
driver_internal_info = Column(JSONEncodedDict)
clean_step = Column(JSONEncodedDict)
# NOTE(deva): this is the host name of the conductor which has
# acquired a TaskManager lock on the node.
# We should use an INT FK (conductors.id) in the future.
reservation = Column(String(255), nullable=True)
# NOTE(deva): this is the id of the last conductor which prepared local
# state for the node (eg, a PXE config file).
# When affinity and the hash ring's mapping do not match,
# this indicates that a conductor should rebuild local state.
conductor_affinity = Column(Integer,
ForeignKey('conductors.id',
name='nodes_conductor_affinity_fk'),
nullable=True)
maintenance = Column(Boolean, default=False)
maintenance_reason = Column(Text, nullable=True)
console_enabled = Column(Boolean, default=False)
inspection_finished_at = Column(DateTime, nullable=True)
inspection_started_at = Column(DateTime, nullable=True)
extra = Column(JSONEncodedDict)
class Port(Base):
"""Represents a network port of a bare metal node."""
__tablename__ = 'ports'
__table_args__ = (
schema.UniqueConstraint('address', name='uniq_ports0address'),
schema.UniqueConstraint('uuid', name='uniq_ports0uuid'),
table_args())
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
address = Column(String(18))
node_id = Column(Integer, ForeignKey('nodes.id'), nullable=True)
extra = Column(JSONEncodedDict)
| apache-2.0 |
chauhanhardik/populo_2 | common/djangoapps/status/tests.py | 115 | 2136 | # -*- coding: utf-8 -*-
""" Tests for setting and displaying the site status message. """
import ddt
import unittest
from django.test import TestCase
from django.core.cache import cache
from django.conf import settings
from opaque_keys.edx.locations import CourseLocator
from .status import get_site_status_msg
from .models import GlobalStatusMessage, CourseMessage
@ddt.ddt
class TestStatus(TestCase):
"""Test that the get_site_status_msg function does the right thing"""
def setUp(self):
super(TestStatus, self).setUp()
# Clear the cache between test runs.
cache.clear()
self.course_key = CourseLocator(org='TestOrg', course='TestCourse', run='TestRun')
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.data(
("Test global message", "Test course message"),
(u" Ŧɇsŧ sŧȺŧᵾs", u"Ṫëṡẗ ċöüṛṡë ṡẗäẗüṡ "),
(u"", u"Ṫëṡẗ ċöüṛṡë ṡẗäẗüṡ "),
(u" Ŧɇsŧ sŧȺŧᵾs", u""),
)
@ddt.unpack
def test_get_site_status_msg(self, test_global_message, test_course_message):
"""Test status messages in a variety of situations."""
# When we don't have any data set.
self.assertEqual(get_site_status_msg(None), None)
self.assertEqual(get_site_status_msg(self.course_key), None)
msg = GlobalStatusMessage.objects.create(message=test_global_message, enabled=True)
msg.save()
self.assertEqual(get_site_status_msg(None), test_global_message)
course_msg = CourseMessage.objects.create(
global_message=msg, message=test_course_message, course_key=self.course_key
)
course_msg.save()
self.assertEqual(
get_site_status_msg(self.course_key),
u"{} <br /> {}".format(test_global_message, test_course_message)
)
msg = GlobalStatusMessage.objects.create(message="", enabled=False)
msg.save()
self.assertEqual(get_site_status_msg(None), None)
self.assertEqual(get_site_status_msg(self.course_key), None)
| agpl-3.0 |
newyork167/volatility | volatility/plugins/procdump.py | 44 | 9643 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
#
# Additional Authors:
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import os
import struct
import volatility.plugins.taskmods as taskmods
import volatility.debug as debug
import volatility.obj as obj
import volatility.exceptions as exceptions
class ProcExeDump(taskmods.DllList):
"""Dump a process to an executable file sample"""
def __init__(self, config, *args, **kwargs):
taskmods.DllList.__init__(self, config, *args, **kwargs)
config.add_option('DUMP-DIR', short_option = 'D', default = None,
cache_invalidator = False,
help = 'Directory in which to dump executable files')
config.add_option("UNSAFE", short_option = "u", default = False, action = 'store_true',
help = 'Bypasses certain sanity checks when creating image')
def dump_pe(self, space, base, dump_file):
"""
Dump a PE from an AS into a file.
@param space: an AS to use
@param base: PE base address
@param dump_file: dumped file name
@returns a string status message
"""
of = open(os.path.join(self._config.DUMP_DIR, dump_file), 'wb')
try:
for offset, code in self.get_image(space, base):
of.seek(offset)
of.write(code)
result = "OK: {0}".format(dump_file)
except ValueError, ve:
result = "Error: {0}".format(ve)
except exceptions.SanityCheckException, ve:
result = "Error: {0} Try -u/--unsafe".format(ve)
finally:
of.close()
return result
def render_text(self, outfd, data):
"""Renders the tasks to disk images, outputting progress as they go"""
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
self.table_header(outfd,
[("Process(V)", "[addrpad]"),
("ImageBase", "[addrpad]"),
("Name", "20"),
("Result", "")])
for task in data:
task_space = task.get_process_address_space()
if task_space == None:
result = "Error: Cannot acquire process AS"
elif task.Peb == None:
# we must use m() here, because any other attempt to
# reference task.Peb will try to instantiate the _PEB
result = "Error: PEB at {0:#x} is paged".format(task.m('Peb'))
elif task_space.vtop(task.Peb.ImageBaseAddress) == None:
result = "Error: ImageBaseAddress at {0:#x} is paged".format(task.Peb.ImageBaseAddress)
else:
dump_file = "executable." + str(task.UniqueProcessId) + ".exe"
result = self.dump_pe(task_space,
task.Peb.ImageBaseAddress,
dump_file)
self.table_row(outfd,
task.obj_offset,
task.Peb.ImageBaseAddress,
task.ImageFileName,
result)
def round(self, addr, align, up = False):
"""Rounds down an address based on an alignment"""
if addr % align == 0:
return addr
else:
if up:
return (addr + (align - (addr % align)))
return (addr - (addr % align))
def get_nt_header(self, addr_space, base_addr):
"""Returns the NT Header object for a task"""
dos_header = obj.Object("_IMAGE_DOS_HEADER", offset = base_addr,
vm = addr_space)
return dos_header.get_nt_header()
def get_code(self, addr_space, data_start, data_size, offset):
"""Returns a single section of re-created data from a file image"""
first_block = 0x1000 - data_start % 0x1000
full_blocks = ((data_size + (data_start % 0x1000)) / 0x1000) - 1
left_over = (data_size + data_start) % 0x1000
paddr = addr_space.vtop(data_start)
code = ""
# Deal with reads that are smaller than a block
if data_size < first_block:
data_read = addr_space.zread(data_start, data_size)
if paddr == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(data_start, offset, data_size))
code += data_read
return (offset, code)
data_read = addr_space.zread(data_start, first_block)
if paddr == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(data_start, offset, first_block))
code += data_read
# The middle part of the read
new_vaddr = data_start + first_block
for _i in range(0, full_blocks):
data_read = addr_space.zread(new_vaddr, 0x1000)
if addr_space.vtop(new_vaddr) == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(new_vaddr, offset, 0x1000))
code += data_read
new_vaddr = new_vaddr + 0x1000
# The last part of the read
if left_over > 0:
data_read = addr_space.zread(new_vaddr, left_over)
if addr_space.vtop(new_vaddr) == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(new_vaddr, offset, left_over))
code += data_read
return (offset, code)
def get_image(self, addr_space, base_addr):
"""Outputs an executable disk image of a process"""
nt_header = self.get_nt_header(addr_space = addr_space,
base_addr = base_addr)
soh = nt_header.OptionalHeader.SizeOfHeaders
header = addr_space.zread(base_addr, soh)
yield (0, header)
fa = nt_header.OptionalHeader.FileAlignment
for sect in nt_header.get_sections(self._config.UNSAFE):
foa = self.round(sect.PointerToRawData, fa)
if foa != sect.PointerToRawData:
debug.warning("Section start on disk not aligned to file alignment.\n")
debug.warning("Adjusted section start from {0} to {1}.\n".format(sect.PointerToRawData, foa))
yield self.get_code(addr_space,
sect.VirtualAddress + base_addr,
sect.SizeOfRawData, foa)
class ProcMemDump(ProcExeDump):
"""Dump a process to an executable memory sample"""
def replace_header_field(self, sect, header, item, value):
"""Replaces a field in a sector header"""
field_size = item.size()
start = item.obj_offset - sect.obj_offset
end = start + field_size
newval = struct.pack(item.format_string, int(value))
result = header[:start] + newval + header[end:]
return result
def get_image(self, addr_space, base_addr):
"""Outputs an executable memory image of a process"""
nt_header = self.get_nt_header(addr_space, base_addr)
sa = nt_header.OptionalHeader.SectionAlignment
shs = addr_space.profile.get_obj_size('_IMAGE_SECTION_HEADER')
yield self.get_code(addr_space, base_addr, nt_header.OptionalHeader.SizeOfImage, 0)
prevsect = None
sect_sizes = []
for sect in nt_header.get_sections(self._config.UNSAFE):
if prevsect is not None:
sect_sizes.append(sect.VirtualAddress - prevsect.VirtualAddress)
prevsect = sect
if prevsect is not None:
sect_sizes.append(self.round(prevsect.Misc.VirtualSize, sa, up = True))
counter = 0
start_addr = nt_header.FileHeader.SizeOfOptionalHeader + (nt_header.OptionalHeader.obj_offset - base_addr)
for sect in nt_header.get_sections(self._config.UNSAFE):
sectheader = addr_space.read(sect.obj_offset, shs)
# Change the PointerToRawData
sectheader = self.replace_header_field(sect, sectheader, sect.PointerToRawData, sect.VirtualAddress)
sectheader = self.replace_header_field(sect, sectheader, sect.SizeOfRawData, sect_sizes[counter])
sectheader = self.replace_header_field(sect, sectheader, sect.Misc.VirtualSize, sect_sizes[counter])
yield (start_addr + (counter * shs), sectheader)
counter += 1
| gpl-2.0 |
cogmission/nupic | tests/unit/nupic/regions/knn_anomaly_classifier_region_test.py | 35 | 30031 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the clamodel module."""
import sys
import copy
from datetime import datetime
import unittest2 as unittest
import numpy
from mock import Mock, patch, ANY, call
from nupic.support.unittesthelpers.testcasebase import (unittest,
TestOptionParser)
from nupic.frameworks.opf.opfutils import InferenceType
from nupic.regions.KNNAnomalyClassifierRegion import (
KNNAnomalyClassifierRegion,
_CLAClassificationRecord)
from nupic.frameworks.opf.opfutils import InferenceType
from nupic.frameworks.opf.exceptions import (CLAModelInvalidRangeError)
class KNNAnomalyClassifierRegionTest(unittest.TestCase):
"""KNNAnomalyClassifierRegion unit tests."""
def setUp(self):
self.params = dict(
trainRecords=10,
anomalyThreshold=1.1,
cacheSize=10000,
k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
self.helper = KNNAnomalyClassifierRegion(**self.params)
def testInit(self):
params = dict(
trainRecords=100,
anomalyThreshold=101,
cacheSize=102,
classificationVectorType=1,
k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
helper = KNNAnomalyClassifierRegion(**params)
self.assertEqual(helper.trainRecords, params['trainRecords'])
self.assertEqual(helper.anomalyThreshold, params['anomalyThreshold'])
self.assertEqual(helper.cacheSize, params['cacheSize'])
self.assertEqual(helper.classificationVectorType,
params['classificationVectorType'])
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
@patch.object(KNNAnomalyClassifierRegion, 'getParameter')
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
def testCompute(self, constructRecord, getParam, classifyState):
params = {
'trainRecords': 0
}
getParam.side_effect = params.get
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
constructRecord.return_value = record
self.helper.compute(dict(), dict())
classifyState.assert_called_once_with(record)
self.assertEqual(self.helper.labelResults, state['anomalyLabel'])
def testGetLabels(self):
# No _recordsCache
self.helper._recordsCache = []
self.assertEqual(self.helper.getLabels(), \
{'isProcessing': False, 'recordLabels': []})
# Invalid ranges
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.getLabels, start=100, end=100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.getLabels, start=-100, end=-100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.getLabels, start=100, end=-100)
# Valid no threshold labels
values = {
'categoryRecencyList': [4, 5, 7],
}
self.helper.saved_categories = ['TestCategory']
categoryList = [1, 1, 1]
classifier = self.helper._knnclassifier
classifier.getParameter = Mock(side_effect=values.get)
classifier._knn._categoryList = categoryList
results = self.helper.getLabels()
self.assertTrue('isProcessing' in results)
self.assertTrue('recordLabels' in results)
self.assertEqual(len(results['recordLabels']),
len(values['categoryRecencyList']))
for record in results['recordLabels']:
self.assertTrue(record['ROWID'] in values['categoryRecencyList'])
self.assertEqual(record['labels'], self.helper.saved_categories)
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
def testAddLabel(self, classifyState, constructVector, getVector):
# Setup Mocks
getVector.return_value = numpy.array([0, 0, 0, 1, 0, 0, 1])
knn = self.helper._knnclassifier._knn
knn.learn = Mock()
# Invalid ranges
self.helper._recordsCache = []
self.assertRaises(CLAModelInvalidRangeError,
self.helper.addLabel, start=100, end=100, labelName="test")
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.addLabel, start=100, end=100, labelName="test")
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.addLabel, start=-100, end=-100, labelName="test")
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.addLabel, start=100, end=-100, labelName="test")
# Valid no threshold labels
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=[], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.addLabel(11, 12, "Added")
# Verifies records were updated
self.assertEqual(results, None)
self.assertTrue('Added' in self.helper._recordsCache[1].anomalyLabel)
self.assertTrue(self.helper._recordsCache[1].setByUser)
# Verifies record added to KNN classifier
knn.learn.assert_called_once_with(ANY, ANY, rowID=11)
# Verifies records after added label is recomputed
classifyState.assert_called_once_with(self.helper._recordsCache[2])
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
def testRemoveLabel(self, classifyState, constructClassificationRecord):
knn = self.helper._knnclassifier._knn
knn._numPatterns = 3
knn._categoryRecencyList = [10, 11, 12]
knn.removeIds = Mock(side_effect = self.mockRemoveIds)
self.helper._recordsCache = []
self.assertRaises(CLAModelInvalidRangeError,
self.helper.removeLabels,)
# Invalid ranges
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.removeLabels, start=100, end=100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.removeLabels, start=-100, end=-100)
self.helper._recordsCache = [Mock(ROWID=10)]
self.assertRaises(CLAModelInvalidRangeError,
self.helper.removeLabels, start=100, end=-100)
# Valid no threshold labels
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.removeLabels(11, 12, "Test")
self.assertEqual(results, None)
self.assertTrue('Test' not in self.helper._recordsCache[1].anomalyLabel)
# Verifies records removed from KNN classifier
self.assertEqual(knn.removeIds.mock_calls, [call([11]), call([])])
# Verifies records after removed record are updated
classifyState.assert_called_once_with(self.helper._recordsCache[2])
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
def testRemoveLabelNoFilter(self, classifyState,
constructClassificationRecord):
knn = self.helper._knnclassifier._knn
knn._numPatterns = 3
knn._categoryRecencyList = [10, 11, 12]
knn.removeIds = Mock(side_effect=self.mockRemoveIds)
# Valid no threshold labels
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.removeLabels(11, 12)
self.assertEqual(results, None)
self.assertTrue('Test' not in self.helper._recordsCache[1].anomalyLabel)
# Verifies records removed from KNN classifier
self.assertEqual(knn.removeIds.mock_calls, [call([11]), call([])])
# Verifies records after removed record are updated
classifyState.assert_called_once_with(self.helper._recordsCache[2])
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
def testSetGetThreshold(self, classifyState):
self.helper._recordsCache = [Mock(), Mock(), Mock()]
self.helper.setParameter('anomalyThreshold', None, 1.0)
self.assertAlmostEqual(self.helper.anomalyThreshold, 1.0)
self.assertEqual(len(classifyState.mock_calls),
len(self.helper._recordsCache))
self.assertAlmostEqual(self.helper.getParameter('anomalyThreshold'), 1.0)
self.assertRaises(Exception, self.helper.setParameter,
'anomalyThreshold', None, 'invalid')
@patch.object(KNNAnomalyClassifierRegion, 'classifyState')
def testSetGetWaitRecords(self, classifyState):
self.helper._recordsCache = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
self.helper.setParameter('trainRecords', None, 20)
self.assertEqual(self.helper.trainRecords, 20)
self.assertEqual(len(classifyState.mock_calls),
len(self.helper._recordsCache))
self.assertEqual(self.helper.getParameter('trainRecords'), 20)
# Test invalid parameter type
self.assertRaises(Exception, self.helper.setParameter,
'trainRecords', None, 'invalid')
# Test invalid value before first record ROWID in cache
state = {
"ROWID": 1000,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
self.helper._recordsCache = [state]
self.assertRaises(Exception, self.helper.setParameter,
'trainRecords', None, 0)
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
def testSetGetWaitRecordsRecalculate(self, getRecord):
"""
This test ensures that records in classifier are removed when they are no
longer being used when the trainRecords is set.
"""
self.helper.cacheSize = 5
self.helper.anomalyThreshold = 0.8
self.helper._anomalyVectorLength = 20
records = [
Mock(ROWID=10, anomalyLabel=["Test"], anomalyScore=1, setByUser=False, anomalyVector=numpy.array([1,4])),
Mock(ROWID=11, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,2])),
Mock(ROWID=12, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,4])),
Mock(ROWID=13, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,2,6,7])),
Mock(ROWID=14, anomalyLabel=["Test"], anomalyScore=1, setByUser=False, anomalyVector=numpy.array([1,10])),
Mock(ROWID=15, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,3])),
Mock(ROWID=16, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,4])),
Mock(ROWID=17, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([10])),
Mock(ROWID=18, anomalyLabel=["Test"], anomalyScore=0, setByUser=False, anomalyVector=numpy.array([1,4]))]
getRecord.side_effect = records
for i in records:
self.helper.compute(dict(), dict())
self.assertEqual(self.helper._knnclassifier._knn._numPatterns, 6)
self.assertEqual(
self.helper._knnclassifier.getParameter('categoryRecencyList'),
[10, 12, 14, 16, 17, 18],
"Classifier incorrectly classified test records."
)
# Now set trainRecords and should remove the labels outside of cache
# and relabel points.
self.helper.setParameter('trainRecords', None, 14)
self.assertEqual(self.helper._knnclassifier._knn._numPatterns, 2)
self.assertEqual(
self.helper._knnclassifier.getParameter('categoryRecencyList'),
[14, 17],
"Classifier incorrectly reclassified test records after setting "
"trainRecords")
@patch.object(KNNAnomalyClassifierRegion, '_addRecordToKNN')
@patch.object(KNNAnomalyClassifierRegion, '_deleteRecordsFromKNN')
@patch.object(KNNAnomalyClassifierRegion, '_recomputeRecordFromKNN')
@patch.object(KNNAnomalyClassifierRegion, '_categoryToLabelList')
def testUpdateState(self, toLabelList, recompute, deleteRecord, addRecord):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "",
"anomalyLabel": ["Label"],
"setByUser": False
}
# Test record not labeled and not above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.trainRecords = 0
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel, [])
deleteRecord.assert_called_once_with([state])
# Test record not labeled and above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 0.5
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel, \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL])
addRecord.assert_called_once_with(state)
# Test record not labeled and above threshold during wait period
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.trainRecords = 10
self.helper.anomalyThreshold = 0.5
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel, [])
self.assertTrue(not addRecord.called)
# Test record labeled and not above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.trainRecords = 0
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = ["Label"]
state = _CLAClassificationRecord(**record)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel, ["Label"])
self.assertTrue(not addRecord.called)
# Test setByUser
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = ["Label 2"]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
state = _CLAClassificationRecord(**recordCopy)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel,
[recordCopy["anomalyLabel"][0], toLabelList.return_value[0]])
addRecord.assert_called_once_with(state)
# Test removal of above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = []
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL,
KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
KNNAnomalyClassifierRegion.AUTO_TAG]
state = _CLAClassificationRecord(**recordCopy)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel, [])
# Auto classified threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 1.1
toLabelList.return_value = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL]
state = _CLAClassificationRecord(**recordCopy)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel,
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
KNNAnomalyClassifierRegion.AUTO_TAG])
addRecord.assert_called_once_with(state)
# Test precedence of threshold label above auto threshold label
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper.anomalyThreshold = 0.8
toLabelList.return_value = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL,
KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
KNNAnomalyClassifierRegion.AUTO_TAG]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL]
state = _CLAClassificationRecord(**recordCopy)
self.helper.classifyState(state)
self.assertEqual(state.anomalyLabel,
[KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL])
addRecord.assert_called_once_with(state)
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
def testAddRecordToKNN(self, getAnomalyVector):
getAnomalyVector.return_value = numpy.array([0, 1, 0, 0, 1, 0, 1, 1])
values = {
'categoryRecencyList': [1, 2, 3]
}
classifier = self.helper._knnclassifier
classifier.getParameter = Mock(side_effect=values.get)
classifier._knn.learn = Mock()
classifier._knn.prototypeSetCategory = Mock()
state = {
"ROWID": 5,
"anomalyScore": 1.0,
"anomalyVector": numpy.array([1, 5, 7, 8]),
"anomalyLabel": ["Label"],
"setByUser": False
}
record = _CLAClassificationRecord(**state)
# Test with record not already in KNN
self.helper._addRecordToKNN(record)
classifier._knn.learn.assert_called_once_with(getAnomalyVector.return_value,
ANY, rowID=state['ROWID'])
self.assertTrue(not classifier._knn.prototypeSetCategory.called)
classifier._knn.learn.reset_mock()
# Test with record already in KNN
values = {
'categoryRecencyList': [1, 2, 3, 5]
}
classifier.getParameter.side_effect = values.get
self.helper._addRecordToKNN(record)
classifier._knn.prototypeSetCategory.assert_called_once_with(
state['ROWID'], ANY)
self.assertTrue(not classifier._knn.learn.called)
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
def testDeleteRangeFromKNN(self, getAnomalyVector):
getAnomalyVector.return_value = "Vector"
values = {
'categoryRecencyList': [1, 2, 3]
}
classifier = self.helper._knnclassifier
classifier.getParameter = Mock(side_effect=values.get)
classifier._knn._numPatterns = len(values['categoryRecencyList'])
classifier._knn.removeIds = Mock(side_effect=self.mockRemoveIds)
# Test with record not already in KNN
self.helper._deleteRangeFromKNN(start=1, end=3)
classifier._knn.removeIds.assert_called_once_with([1, 2])
classifier._knn.removeIds.reset_mock()
# Test with record already in KNN
values = {
'categoryRecencyList': [1, 2, 3, 5]
}
classifier.getParameter.side_effect = values.get
self.helper._deleteRangeFromKNN(start=1)
classifier._knn.removeIds.assert_called_once_with([1, 2, 3, 5])
@patch.object(KNNAnomalyClassifierRegion, '_getStateAnomalyVector')
def testRecomputeRecordFromKNN(self, getAnomalyVector):
getAnomalyVector.return_value = "Vector"
self.helper.trainRecords = 0
values = {
'categoryRecencyList': [1, 2, 3, 5, 6, 7, 8, 9],
'latestDists': numpy.array([0.7, 0.2, 0.5, 1, 0.3, 0.2, 0.1]),
'categories': ['A','B','C','D','E','F','G']
}
classifier = self.helper._knnclassifier
classifier.getLatestDistances = Mock(return_value=values['latestDists'])
classifier.getCategoryList = Mock(return_value=values['categories'])
classifier.getParameter = Mock(side_effect=values.get)
classifier.setParameter = Mock()
classifier.compute = Mock()
state = {
"ROWID": 5,
"anomalyScore": 1.0,
"anomalyVector": "",
"anomalyLabel": ["Label"],
"setByUser": False
}
record = _CLAClassificationRecord(**state)
# Test finding best category before record - exists
self.helper._classificationMaxDist = 0.4
self.helper._autoDetectWaitRecords = 0
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, 'B')
# Test finding best category before record - does not exists
self.helper._classificationMaxDist = 0.1
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, None)
# Test finding best category before record - not record before
record.ROWID = 0
self.helper._classificationMaxDist = 0.1
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, None)
def testConstructClassificationVector(self):
modelParams = {
'__numRunCalls': 0
}
spVals = {
'params': {
'activeOutputCount': 5
},
'output': {
'bottomUpOut': numpy.array([1, 1, 0, 0, 1])
}
}
tpVals = {
'params': {
'cellsPerColumn': 2,
'columnCount': 2
},
'output': {
'lrnActive': numpy.array([1, 0, 0, 1]),
'topDownOut': numpy.array([1, 0, 0, 0, 1])
}
}
inputs = dict(
spBottomUpOut=spVals['output']['bottomUpOut'],
tpTopDownOut=tpVals['output']['topDownOut'],
tpLrnActiveStateT=tpVals['output']['lrnActive']
)
self.helper._activeColumnCount = 5
# Test TP Cell vector
self.helper.classificationVectorType = 1
vector = self.helper.constructClassificationRecord(inputs)
self.assertEqual(vector.anomalyVector,
tpVals['output']['lrnActive'].nonzero()[0].tolist())
# Test SP and TP Column Error vector
self.helper.classificationVectorType = 2
self.helper._prevPredictedColumns = numpy.array(
[1, 0, 0, 0, 1]).nonzero()[0]
vector = self.helper.constructClassificationRecord(inputs)
self.assertEqual(vector.anomalyVector, [0, 1, 4])
self.helper._prevPredictedColumns = numpy.array(
[1, 0, 1, 0, 0]).nonzero()[0]
vector = self.helper.constructClassificationRecord(inputs)
self.assertEqual(vector.anomalyVector, [0, 1, 4, 7])
self.helper.classificationVectorType = 3
self.assertRaises(TypeError, self.helper.constructClassificationRecord,
inputs)
@patch.object(KNNAnomalyClassifierRegion ,'classifyState')
@patch.object(KNNAnomalyClassifierRegion, 'constructClassificationRecord')
def testCompute(self, createRecord, updateState):
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": numpy.array([1, 0, 0, 0, 1]),
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
createRecord.return_value = record
inputs = dict()
outputs= dict()
# Test add first record
self.helper.cacheSize = 10
self.helper.trainRecords = 0
self.helper._recordsCache = []
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
updateState.assert_called_once_with(self.helper._recordsCache[-1])
# Test add record before wait records
updateState.reset_mock()
self.helper.cacheSize = 10
self.helper.trainRecords = 10
self.helper._recordsCache = []
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 2)
self.assertTrue(not updateState.called)
# Test exceeded cache length
updateState.reset_mock()
self.helper.cacheSize = 1
self.helper._recordsCache = []
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
self.helper.compute(inputs, outputs)
self.assertEqual(self.helper._recordsCache[-1], record)
self.assertEqual(len(self.helper._recordsCache), 1)
self.assertTrue(not updateState.called)
def testCategoryToList(self):
result = self.helper._categoryToLabelList(None)
self.assertEqual(result, [])
self.helper.saved_categories = ['A', 'B', 'C']
result = self.helper._categoryToLabelList(1)
self.assertEqual(result, ['A'])
result = self.helper._categoryToLabelList(4)
self.assertEqual(result, ['C'])
result = self.helper._categoryToLabelList(5)
self.assertEqual(result, ['A','C'])
def testGetAnomalyVector(self):
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
self.helper._anomalyVectorLength = 10
vector = self.helper._getStateAnomalyVector(record)
self.assertEqual(len(vector), self.helper._anomalyVectorLength)
self.assertEqual(vector.nonzero()[0].tolist(), record.anomalyVector)
# Tests for configuration
# ===========================================================================
def testSetState(self):
# No Version set
state = dict(_classificationDelay=100)
state['_knnclassifierProps'] = self.params
self.helper._vectorType = None
self.helper.__setstate__(state)
self.assertEqual(self.helper.classificationVectorType, 1)
self.assertEqual(self.helper._version,
KNNAnomalyClassifierRegion.__VERSION__)
# Version 1
state = dict(_version=1, _classificationDelay=100)
state['_knnclassifierProps'] = self.params
self.helper.__setstate__(state)
self.assertEqual(self.helper._version,
KNNAnomalyClassifierRegion.__VERSION__)
# Invalid Version
state = dict(_version="invalid")
state['_knnclassifierProps'] = self.params
self.assertRaises(Exception, self.helper.__setstate__, state)
# Tests for _CLAClassificationRecord class
# ===========================================================================
def testCLAClassificationRecord(self):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label"
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, False)
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": True
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, record['setByUser'])
def testCLAClassificationRecordGetState(self):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": False
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.__getstate__(), record)
def testCLAClassificationRecordSetState(self):
record = {
"ROWID": None,
"anomalyScore": None,
"anomalyVector": None,
"anomalyLabel": None,
"setByUser": None
}
state = _CLAClassificationRecord(**record)
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": False
}
state.__setstate__(record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, record['setByUser'])
def mockRemoveIds(self, ids):
self.helper._knnclassifier._knn._numPatterns -= len(ids)
knnClassifier = self.helper._knnclassifier
for idx in ids:
if idx in self.helper._knnclassifier.getParameter('categoryRecencyList'):
knnClassifier.getParameter('categoryRecencyList').remove(idx)
if __name__ == '__main__':
parser = TestOptionParser()
options, args = parser.parse_args()
# Form the command line for the unit test framework
args = [sys.argv[0]] + args
unittest.main(argv=args)
| agpl-3.0 |
lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/django/contrib/gis/geos/libgeos.py | 115 | 5836 | """
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import c_char_p, Structure, CDLL, CFUNCTYPE, POINTER
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifiying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if not lib_path is None: break
# No GEOS library could be found.
if lib_path is None:
raise ImportError('Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names))
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
lgeos = CDLL(lib_path)
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n' % warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n' % err_msg)
error_h = ERRORFUNC(error_h)
#### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure): pass
class GEOSPrepGeom_t(Structure): pass
class GEOSCoordSeq_t(Structure): pass
class GEOSContextHandle_t(Structure): pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility accross 32 and 64-bit platforms.
geos_version = lgeos.GEOSversion
geos_version.argtypes = None
geos_version.restype = c_char_p
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return dict((key, m.group(key)) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor'))
# Version numbers and whether or not prepared geometry support is available.
_verinfo = geos_version_info()
GEOS_MAJOR_VERSION = int(_verinfo['major'])
GEOS_MINOR_VERSION = int(_verinfo['minor'])
GEOS_SUBMINOR_VERSION = int(_verinfo['subminor'])
del _verinfo
GEOS_VERSION = (GEOS_MAJOR_VERSION, GEOS_MINOR_VERSION, GEOS_SUBMINOR_VERSION)
GEOS_PREPARE = GEOS_VERSION >= (3, 1, 0)
if GEOS_PREPARE:
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
lgeos.initGEOS_r.restype = CONTEXT_PTR
lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
else:
# When thread-safety isn't available, the initGEOS routine must be called
# first. This function takes the notice and error functions, defined
# as Python callbacks above, as parameters. Here is the C code that is
# wrapped:
# extern void GEOS_DLL initGEOS(GEOSMessageHandler notice_function, GEOSMessageHandler error_function);
lgeos.initGEOS(notice_h, error_h)
# Calling finishGEOS() upon exit of the interpreter.
import atexit
atexit.register(lgeos.finishGEOS)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.