repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_05_01/_configuration.py | 1 | 3238 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class PolicyClientConfiguration(Configuration):
"""Configuration for PolicyClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(PolicyClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2018-05-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit |
jnerin/ansible | lib/ansible/modules/cloud/cloudstack/cs_vpc_offering.py | 15 | 8517 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, David Passante (@dpassante)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_vpc_offering
short_description: Manages vpc offerings on Apache CloudStack based clouds.
description:
- Create, update, enable, disable and remove CloudStack VPC offerings.
version_added: '2.5'
author: "David Passante (@dpassante)"
options:
name:
description:
- The name of the vpc offering
required: true
state:
description:
- State of the vpc offering.
choices: [ enabled, present, disabled, absent ]
required: false
default: present
display_text:
description:
- Display text of the vpc offerings
required: false
service_capabilities:
description:
- Desired service capabilities as part of vpc offering.
aliases: [ service_capability ]
service_offering:
description:
- The name or ID of the service offering for the VPC router appliance.
required: false
supported_services:
description:
- Services supported by the vpc offering
aliases: [ supported_service ]
required: false
service_providers:
description:
- provider to service mapping. If not specified, the provider for the service will be mapped to the default provider on the physical network
aliases: [ service_provider ]
required: false
poll_async:
description:
- Poll async jobs until job has finished.
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a vpc offering and enable it
- local_action:
module: cs_vpc_offering
name: "my_vpc_offering"
display_text: "vpc offering description"
state: enabled
supported_services: [ Dns, Dhcp ]
service_providers:
- {service: 'dns', provider: 'virtualrouter'}
- {service: 'dhcp', provider: 'virtualrouter'}
# Remove a vpc offering
- local_action:
module: cs_vpc_offering
name: "my_vpc_offering"
state: absent
'''
RETURN = '''
---
id:
description: UUID of the vpc offering.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: The name of the vpc offering
returned: success
type: string
sample: MyCustomVPCOffering
display_text:
description: The display text of the vpc offering
returned: success
type: string
sample: My vpc offering
state:
description: The state of the vpc offering
returned: success
type: string
sample: Enabled
service_offering_id:
description: The service offering ID.
returned: success
type: string
sample: c5f7a5fc-43f8-11e5-a151-feff819cdc9f
is_default:
description: Whether VPC offering is the default offering or not.
returned: success
type: bool
sample: false
region_level:
description: Indicated if the offering can support region level vpc.
returned: success
type: bool
sample: false
distributed:
description: Indicates if the vpc offering supports distributed router for one-hop forwarding.
returned: success
type: bool
sample: false
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackVPCOffering(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVPCOffering, self).__init__(module)
self.returns = {
'serviceofferingid': 'service_offering_id',
'isdefault': 'is_default',
'distributedvpcrouter': 'distributed',
'supportsregionLevelvpc': 'region_level',
}
self.vpc_offering = None
def get_vpc_offering(self):
if self.vpc_offering:
return self.vpc_offering
args = {
'name': self.module.params.get('name'),
}
vo = self.query_api('listVPCOfferings', **args)
if vo:
self.vpc_offering = vo['vpcoffering'][0]
return self.vpc_offering
def get_service_offering_id(self):
service_offering = self.module.params.get('service_offering')
if not service_offering:
return None
args = {
'issystem': True
}
service_offerings = self.query_api('listServiceOfferings', **args)
if service_offerings:
for s in service_offerings['serviceoffering']:
if service_offering in [s['name'], s['id']]:
return s['id']
self.fail_json(msg="Service offering '%s' not found" % service_offering)
def create_or_update(self):
vpc_offering = self.get_vpc_offering()
if not vpc_offering:
vpc_offering = self.create_vpc_offering()
return self.update_vpc_offering(vpc_offering)
def create_vpc_offering(self):
vpc_offering = None
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'state': self.module.params.get('state'),
'displaytext': self.module.params.get('display_text'),
'supportedservices': self.module.params.get('supported_services'),
'serviceproviderlist': self.module.params.get('service_providers'),
'serviceofferingid': self.get_service_offering_id(),
}
required_params = [
'display_text',
'supported_services',
]
self.module.fail_on_missing_params(required_params=required_params)
if not self.module.check_mode:
res = self.query_api('createVPCOffering', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpc_offering = self.poll_job(res, 'vpcoffering')
return vpc_offering
def delete_vpc_offering(self):
vpc_offering = self.get_vpc_offering()
if vpc_offering:
self.result['changed'] = True
args = {
'id': vpc_offering['id'],
}
if not self.module.check_mode:
res = self.query_api('deleteVPCOffering', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpc_offering = self.poll_job(res, 'vpcoffering')
return vpc_offering
def update_vpc_offering(self, vpc_offering):
if not vpc_offering:
return vpc_offering
args = {
'id': vpc_offering['id'],
'state': self.module.params.get('state'),
'name': self.module.params.get('name'),
'displaytext': self.module.params.get('display_text'),
}
if args['state'] in ['enabled', 'disabled']:
args['state'] = args['state'].title()
else:
del args['state']
if self.has_changed(args, vpc_offering):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateVPCOffering', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpc_offering = self.poll_job(res, 'vpcoffering')
return vpc_offering
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
state=dict(choices=['enabled', 'present', 'disabled', 'absent'], default='present'),
service_capabilities=dict(type='list', aliases=['service_capability']),
service_offering=dict(),
supported_services=dict(type='list', aliases=['supported_service']),
service_providers=dict(type='list', aliases=['service_provider']),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_vpc_offering = AnsibleCloudStackVPCOffering(module)
state = module.params.get('state')
if state in ['absent']:
vpc_offering = acs_vpc_offering.delete_vpc_offering()
else:
vpc_offering = acs_vpc_offering.create_or_update()
result = acs_vpc_offering.get_result(vpc_offering)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kronoscode/Booktype | lib/booktype/apps/core/south_migrations/0001_initial.py | 8 | 11534 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Permission'
db.create_table(u'core_permission', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('app_name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('name', self.gf('django.db.models.fields.CharField')(max_length=60)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'core', ['Permission'])
# Adding model 'Role'
db.create_table(u'core_role', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=60)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('book', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['editor.Book'], null=True, blank=True)),
))
db.send_create_signal(u'core', ['Role'])
# Adding M2M table for field permissions on 'Role'
db.create_table(u'core_role_permissions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('role', models.ForeignKey(orm[u'core.role'], null=False)),
('permission', models.ForeignKey(orm[u'core.permission'], null=False))
))
db.create_unique(u'core_role_permissions', ['role_id', 'permission_id'])
# Adding M2M table for field members on 'Role'
db.create_table(u'core_role_members', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('role', models.ForeignKey(orm[u'core.role'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(u'core_role_members', ['role_id', 'user_id'])
def backwards(self, orm):
# Deleting model 'Permission'
db.delete_table(u'core_permission')
# Deleting model 'Role'
db.delete_table(u'core_role')
# Removing M2M table for field permissions on 'Role'
db.delete_table('core_role_permissions')
# Removing M2M table for field members on 'Role'
db.delete_table('core_role_members')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.permission': {
'Meta': {'object_name': 'Permission'},
'app_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'core.role': {
'Meta': {'object_name': 'Role'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['editor.Book']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Permission']", 'null': 'True', 'blank': 'True'})
},
u'editor.book': {
'Meta': {'object_name': 'Book'},
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['editor.BookiGroup']", 'null': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['editor.Language']", 'null': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['editor.License']", 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'permission': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'status'", 'null': 'True', 'to': u"orm['editor.BookStatus']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2500'}),
'url_title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2500'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'version'", 'null': 'True', 'to': u"orm['editor.BookVersion']"})
},
u'editor.bookigroup': {
'Meta': {'object_name': 'BookiGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'url_name': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'editor.bookstatus': {
'Meta': {'object_name': 'BookStatus'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['editor.Book']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {})
},
u'editor.bookversion': {
'Meta': {'object_name': 'BookVersion'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['editor.Book']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major': ('django.db.models.fields.IntegerField', [], {}),
'minor': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'editor.language': {
'Meta': {'object_name': 'Language'},
'abbrevation': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'editor.license': {
'Meta': {'object_name': 'License'},
'abbrevation': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core'] | agpl-3.0 |
nagyistoce/locust | examples/events.py | 41 | 2105 | # encoding: utf-8
"""
This is an example of a locustfile that uses Locust's built in event hooks to
track the sum of the content-length header in all successful HTTP responses
"""
from locust import HttpLocust, TaskSet, task, events, web
class MyTaskSet(TaskSet):
@task(2)
def index(l):
l.client.get("/")
@task(1)
def stats(l):
l.client.get("/stats/requests")
class WebsiteUser(HttpLocust):
host = "http://127.0.0.1:8089"
min_wait = 2000
max_wait = 5000
task_set = MyTaskSet
"""
We need somewhere to store the stats.
On the master node stats will contain the aggregated sum of all content-lengths,
while on the slave nodes this will be the sum of the content-lengths since the
last stats report was sent to the master
"""
stats = {"content-length":0}
def on_request_success(request_type, name, response_time, response_length):
"""
Event handler that get triggered on every successful request
"""
stats["content-length"] += response_length
def on_report_to_master(client_id, data):
"""
This event is triggered on the slave instances every time a stats report is
to be sent to the locust master. It will allow us to add our extra content-length
data to the dict that is being sent, and then we clear the local stats in the slave.
"""
data["content-length"] = stats["content-length"]
stats["content-length"] = 0
def on_slave_report(client_id, data):
"""
This event is triggered on the master instance when a new stats report arrives
from a slave. Here we just add the content-length to the master's aggregated
stats dict.
"""
stats["content-length"] += data["content-length"]
# Hook up the event listeners
events.request_success += on_request_success
events.report_to_master += on_report_to_master
events.slave_report += on_slave_report
@web.app.route("/content-length")
def total_content_length():
"""
Add a route to the Locust web app, where we can see the total content-length
"""
return "Total content-length recieved: %i" % stats["content-length"]
| mit |
yongshengwang/hue | build/env/lib/python2.7/site-packages/Paste-2.0.1-py2.7.egg/paste/recursive.py | 50 | 14708 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Middleware to make internal requests and forward requests internally.
When applied, several keys are added to the environment that will allow
you to trigger recursive redirects and forwards.
paste.recursive.include:
When you call
``environ['paste.recursive.include'](new_path_info)`` a response
will be returned. The response has a ``body`` attribute, a
``status`` attribute, and a ``headers`` attribute.
paste.recursive.script_name:
The ``SCRIPT_NAME`` at the point that recursive lives. Only
paths underneath this path can be redirected to.
paste.recursive.old_path_info:
A list of previous ``PATH_INFO`` values from previous redirects.
Raise ``ForwardRequestException(new_path_info)`` to do a forward
(aborting the current request).
"""
import six
import warnings
from six.moves import cStringIO as StringIO
__all__ = ['RecursiveMiddleware']
__pudge_all__ = ['RecursiveMiddleware', 'ForwardRequestException']
class RecursionLoop(AssertionError):
# Subclasses AssertionError for legacy reasons
"""Raised when a recursion enters into a loop"""
class CheckForRecursionMiddleware(object):
def __init__(self, app, env):
self.app = app
self.env = env
def __call__(self, environ, start_response):
path_info = environ.get('PATH_INFO','')
if path_info in self.env.get(
'paste.recursive.old_path_info', []):
raise RecursionLoop(
"Forwarding loop detected; %r visited twice (internal "
"redirect path: %s)"
% (path_info, self.env['paste.recursive.old_path_info']))
old_path_info = self.env.setdefault('paste.recursive.old_path_info', [])
old_path_info.append(self.env.get('PATH_INFO', ''))
return self.app(environ, start_response)
class RecursiveMiddleware(object):
"""
A WSGI middleware that allows for recursive and forwarded calls.
All these calls go to the same 'application', but presumably that
application acts differently with different URLs. The forwarded
URLs must be relative to this container.
Interface is entirely through the ``paste.recursive.forward`` and
``paste.recursive.include`` environmental keys.
"""
def __init__(self, application, global_conf=None):
self.application = application
def __call__(self, environ, start_response):
environ['paste.recursive.forward'] = Forwarder(
self.application,
environ,
start_response)
environ['paste.recursive.include'] = Includer(
self.application,
environ,
start_response)
environ['paste.recursive.include_app_iter'] = IncluderAppIter(
self.application,
environ,
start_response)
my_script_name = environ.get('SCRIPT_NAME', '')
environ['paste.recursive.script_name'] = my_script_name
try:
return self.application(environ, start_response)
except ForwardRequestException as e:
middleware = CheckForRecursionMiddleware(
e.factory(self), environ)
return middleware(environ, start_response)
class ForwardRequestException(Exception):
"""
Used to signal that a request should be forwarded to a different location.
``url``
The URL to forward to starting with a ``/`` and relative to
``RecursiveMiddleware``. URL fragments can also contain query strings
so ``/error?code=404`` would be a valid URL fragment.
``environ``
An altertative WSGI environment dictionary to use for the forwarded
request. If specified is used *instead* of the ``url_fragment``
``factory``
If specifed ``factory`` is used instead of ``url`` or ``environ``.
``factory`` is a callable that takes a WSGI application object
as the first argument and returns an initialised WSGI middleware
which can alter the forwarded response.
Basic usage (must have ``RecursiveMiddleware`` present) :
.. code-block:: python
from paste.recursive import ForwardRequestException
def app(environ, start_response):
if environ['PATH_INFO'] == '/hello':
start_response("200 OK", [('Content-type', 'text/plain')])
return [b'Hello World!']
elif environ['PATH_INFO'] == '/error':
start_response("404 Not Found", [('Content-type', 'text/plain')])
return [b'Page not found']
else:
raise ForwardRequestException('/error')
from paste.recursive import RecursiveMiddleware
app = RecursiveMiddleware(app)
If you ran this application and visited ``/hello`` you would get a
``Hello World!`` message. If you ran the application and visited
``/not_found`` a ``ForwardRequestException`` would be raised and the caught
by the ``RecursiveMiddleware``. The ``RecursiveMiddleware`` would then
return the headers and response from the ``/error`` URL but would display
a ``404 Not found`` status message.
You could also specify an ``environ`` dictionary instead of a url. Using
the same example as before:
.. code-block:: python
def app(environ, start_response):
... same as previous example ...
else:
new_environ = environ.copy()
new_environ['PATH_INFO'] = '/error'
raise ForwardRequestException(environ=new_environ)
Finally, if you want complete control over every aspect of the forward you
can specify a middleware factory. For example to keep the old status code
but use the headers and resposne body from the forwarded response you might
do this:
.. code-block:: python
from paste.recursive import ForwardRequestException
from paste.recursive import RecursiveMiddleware
from paste.errordocument import StatusKeeper
def app(environ, start_response):
if environ['PATH_INFO'] == '/hello':
start_response("200 OK", [('Content-type', 'text/plain')])
return [b'Hello World!']
elif environ['PATH_INFO'] == '/error':
start_response("404 Not Found", [('Content-type', 'text/plain')])
return [b'Page not found']
else:
def factory(app):
return StatusKeeper(app, status='404 Not Found', url='/error')
raise ForwardRequestException(factory=factory)
app = RecursiveMiddleware(app)
"""
def __init__(
self,
url=None,
environ={},
factory=None,
path_info=None):
# Check no incompatible options have been chosen
if factory and url:
raise TypeError(
'You cannot specify factory and a url in '
'ForwardRequestException')
elif factory and environ:
raise TypeError(
'You cannot specify factory and environ in '
'ForwardRequestException')
if url and environ:
raise TypeError(
'You cannot specify environ and url in '
'ForwardRequestException')
# set the path_info or warn about its use.
if path_info:
if not url:
warnings.warn(
"ForwardRequestException(path_info=...) has been deprecated; please "
"use ForwardRequestException(url=...)",
DeprecationWarning, 2)
else:
raise TypeError('You cannot use url and path_info in ForwardRequestException')
self.path_info = path_info
# If the url can be treated as a path_info do that
if url and not '?' in str(url):
self.path_info = url
# Base middleware
class ForwardRequestExceptionMiddleware(object):
def __init__(self, app):
self.app = app
# Otherwise construct the appropriate middleware factory
if hasattr(self, 'path_info'):
p = self.path_info
def factory_(app):
class PathInfoForward(ForwardRequestExceptionMiddleware):
def __call__(self, environ, start_response):
environ['PATH_INFO'] = p
return self.app(environ, start_response)
return PathInfoForward(app)
self.factory = factory_
elif url:
def factory_(app):
class URLForward(ForwardRequestExceptionMiddleware):
def __call__(self, environ, start_response):
environ['PATH_INFO'] = url.split('?')[0]
environ['QUERY_STRING'] = url.split('?')[1]
return self.app(environ, start_response)
return URLForward(app)
self.factory = factory_
elif environ:
def factory_(app):
class EnvironForward(ForwardRequestExceptionMiddleware):
def __call__(self, environ_, start_response):
return self.app(environ, start_response)
return EnvironForward(app)
self.factory = factory_
else:
self.factory = factory
class Recursive(object):
def __init__(self, application, environ, start_response):
self.application = application
self.original_environ = environ.copy()
self.previous_environ = environ
self.start_response = start_response
def __call__(self, path, extra_environ=None):
"""
`extra_environ` is an optional dictionary that is also added
to the forwarded request. E.g., ``{'HTTP_HOST': 'new.host'}``
could be used to forward to a different virtual host.
"""
environ = self.original_environ.copy()
if extra_environ:
environ.update(extra_environ)
environ['paste.recursive.previous_environ'] = self.previous_environ
base_path = self.original_environ.get('SCRIPT_NAME')
if path.startswith('/'):
assert path.startswith(base_path), (
"You can only forward requests to resources under the "
"path %r (not %r)" % (base_path, path))
path = path[len(base_path)+1:]
assert not path.startswith('/')
path_info = '/' + path
environ['PATH_INFO'] = path_info
environ['REQUEST_METHOD'] = 'GET'
environ['CONTENT_LENGTH'] = '0'
environ['CONTENT_TYPE'] = ''
environ['wsgi.input'] = StringIO('')
return self.activate(environ)
def activate(self, environ):
raise NotImplementedError
def __repr__(self):
return '<%s.%s from %s>' % (
self.__class__.__module__,
self.__class__.__name__,
self.original_environ.get('SCRIPT_NAME') or '/')
class Forwarder(Recursive):
"""
The forwarder will try to restart the request, except with
the new `path` (replacing ``PATH_INFO`` in the request).
It must not be called after and headers have been returned.
It returns an iterator that must be returned back up the call
stack, so it must be used like:
.. code-block:: python
return environ['paste.recursive.forward'](path)
Meaningful transformations cannot be done, since headers are
sent directly to the server and cannot be inspected or
rewritten.
"""
def activate(self, environ):
warnings.warn(
"recursive.Forwarder has been deprecated; please use "
"ForwardRequestException",
DeprecationWarning, 2)
return self.application(environ, self.start_response)
class Includer(Recursive):
"""
Starts another request with the given path and adding or
overwriting any values in the `extra_environ` dictionary.
Returns an IncludeResponse object.
"""
def activate(self, environ):
response = IncludedResponse()
def start_response(status, headers, exc_info=None):
if exc_info:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
response.status = status
response.headers = headers
return response.write
app_iter = self.application(environ, start_response)
try:
for s in app_iter:
response.write(s)
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
response.close()
return response
class IncludedResponse(object):
def __init__(self):
self.headers = None
self.status = None
self.output = StringIO()
self.str = None
def close(self):
self.str = self.output.getvalue()
self.output.close()
self.output = None
def write(self, s):
assert self.output is not None, (
"This response has already been closed and no further data "
"can be written.")
self.output.write(s)
def __str__(self):
return self.body
def body__get(self):
if self.str is None:
return self.output.getvalue()
else:
return self.str
body = property(body__get)
class IncluderAppIter(Recursive):
"""
Like Includer, but just stores the app_iter response
(be sure to call close on the response!)
"""
def activate(self, environ):
response = IncludedAppIterResponse()
def start_response(status, headers, exc_info=None):
if exc_info:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
response.status = status
response.headers = headers
return response.write
app_iter = self.application(environ, start_response)
response.app_iter = app_iter
return response
class IncludedAppIterResponse(object):
def __init__(self):
self.status = None
self.headers = None
self.accumulated = []
self.app_iter = None
self._closed = False
def close(self):
assert not self._closed, (
"Tried to close twice")
if hasattr(self.app_iter, 'close'):
self.app_iter.close()
def write(self, s):
self.accumulated.append
def make_recursive_middleware(app, global_conf):
return RecursiveMiddleware(app)
make_recursive_middleware.__doc__ = __doc__
| apache-2.0 |
town-hall-pinball/project-omega | pin/service/matrix.py | 1 | 3862 | # Copyright (c) 2014 - 2016 townhallpinball.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from ..lib import dmd
from ..lib.devices import devices
from ..lib.ui import Canvas
class Matrix(Canvas):
box_when = "closed"
devices = None
selected = None
pulse_color = 0x8
pulse_timer = None
handler = None
def __init__(self, handler=None, box_when=None, devices="switches"):
super(Matrix, self).__init__(left=0, top=0, width=40)
if box_when:
self.box_when = box_when
self.devices = devices
self.handler = handler
self.layout()
def redraw(self):
self.clear()
if self.devices == "switches":
self.dot_column(2, "SD")
self.vline(5, 2, dmd.height - 4, color=0x8)
col = 1
for x in xrange(8, 8 + (8 * 3), 3):
prefix = "S" if self.devices == "switches" else "L"
self.dot_column(x, prefix + str(col))
col += 1
x += 3
if self.devices == "switches":
self.vline(x, 2, dmd.height - 4, color=0x8)
x += 3
self.dot_column(x, "SF")
self.invalidate()
def select(self, switch):
self.handler.cancel(self.pulse_timer)
self.selected = switch
if self.handler and self.selected:
self.pulse_selection()
elif self.handler and not self.selected:
self.redraw()
def pulse_selection(self):
self.pulse_color += 0x2
if self.pulse_color > 0xf:
self.pulse_color = 0x8
self.redraw()
self.pulse_timer = self.handler.wait(0.1, self.pulse_selection)
def cell_rendering(self, device):
if not device:
return "empty"
if device == self.selected:
return "selected"
if self.devices == "switches":
if self.box_when == "closed" and device.is_closed():
return "box"
if self.box_when == "active" and device.active:
return "box"
else:
if device.is_active():
return "box"
return "dot"
def dot_column(self, x, prefix):
y = 5
row = 1
for y in xrange(5, 5 + (8 * 3), 3):
ident = prefix + str(row)
device = devices.get(ident)
rendering = self.cell_rendering(device)
if rendering == "box":
self.box(x - 1, y - 1, 3, 3)
elif rendering == "dot":
self.dot(x, y)
elif rendering == "selected":
self.dot(x, y, self.pulse_color)
self.dot(x-1, y-1, self.pulse_color)
self.dot(x-1, y+1, self.pulse_color)
self.dot(x+1, y-1, self.pulse_color)
self.dot(x+1, y+1, self.pulse_color)
row += 1
| mit |
fuku-ys/earthquake | pyearthquake/OLD.orchestrator/explorer.py | 1 | 10760 | from abc import ABCMeta, abstractmethod
import colorama
import random
import json
from eventlet.greenthread import sleep
from eventlet.timeout import Timeout
from eventlet.queue import *
import six
import time
from .. import LOG as _LOG
from ..signal.signal import EventBase, ActionBase
from .digestible import DigestibleBase
LOG = _LOG.getChild('orchestrator.explorer')
@six.add_metaclass(ABCMeta)
class ExplorerBase(object):
def __init__(self):
# self.graph = None
self._event_q = Queue()
self.oc = None
self.state = None
self.initial_state = None
self.visited_terminal_states = {} # key: state, value: count (TODO: MOVE TO LIBEARTHQUAKE.SO)
self.time_slice = 0
def init_with_orchestrator(self, oc, initial_state):
"""
:param oc: OrchestratorBase
:param initial_state: StateBase
:return: None
"""
self.oc = oc
self.initial_state = initial_state
self.state = self.initial_state.make_copy()
LOG.debug(colorama.Back.BLUE +
'set initial state=%s' +
colorama.Style.RESET_ALL, self.state.to_short_str())
# self.graph = Graph(self.state)
def send_event(self, event):
"""
Send event *to* explorer
:param event: EventBase
:return: None
"""
assert isinstance(event, EventBase)
self._event_q.put(event)
def recv_events(self, timeout_msecs):
"""
Let explorer receive events
:param timeout_msecs: int
:return:
"""
events = []
timeout = Timeout(timeout_msecs / 1000.0)
try:
while True:
event = self._event_q.get()
events.append(event)
except Timeout:
pass
except Exception as e:
raise e
finally:
timeout.cancel()
return events
def _worker__print_events_and_digestibles(self, digestibles, new_events, new_digestibles):
if digestibles:
LOG.debug('Before state %s, the following OLD %d digestibles had been yielded', self.state.to_short_str(),
len(digestibles))
for digestible in digestibles: LOG.debug('* %s', digestible)
LOG.debug('In state %s, the following %d events happend', self.state.to_short_str(), len(new_events))
for e in new_events:
try:
LOG.debug('* %f: %s', e.recv_timestamp, e.abstract_msg)
except Exception:
LOG.debug('* %s', e)
LOG.debug('In state %s, the following NEW %d digestibles were yielded for the above %d events',
self.state.to_short_str(), len(new_digestibles), len(new_events))
for new_digestible in new_digestibles: LOG.debug('* %s', new_digestible)
def worker(self):
digestibles = []
while True:
if self.oc.termination_detector.is_terminal_state(self.state): self.state = self.on_terminal_state()
new_events = self.recv_events(timeout_msecs=self.time_slice)
if not new_events and not digestibles: continue
new_digestibles = []
for e in new_events:
e_handled = False
for w in self.oc.watchers:
if w.handles(e): new_digestibles.extend(w.on_event(self.state, e)); e_handled = True
if not e_handled: new_digestibles.extend(self.oc.default_watcher.on_event(self.state, e))
self._worker__print_events_and_digestibles(digestibles, new_events, new_digestibles)
digestibles.extend(new_digestibles)
if not digestibles: LOG.warn('No DIGESTIBLE, THIS MIGHT CAUSE FALSE DEADLOCK, state=%s',
self.state.to_short_str())
next_state, digestibles = self.do_it(digestibles)
if not digestibles: LOG.warn('No DIGESTIBLE, THIS MIGHT CAUSE FALSE DEADLOCK, next_state=%s',
next_state.to_short_str())
LOG.debug('transit from %s to %s', self.state.to_short_str(), next_state.to_short_str())
self.state = next_state
def do_it(self, digestibles):
"""
select a digestible from digestibles and do it in the state.
returns: (next_state, other_digestibles)
FIXME: rename me!
"""
if not digestibles: return self.state, []
chosen_digestible = self.choose_digestible(digestibles)
LOG.debug('Chosen digestible: %s', chosen_digestible)
assert (any(digestible.event.uuid == chosen_digestible.event.uuid for digestible in digestibles))
digestibles_len_before_remove = len(digestibles)
digestibles.remove(chosen_digestible)
assert len(digestibles) == digestibles_len_before_remove - 1, 'hash race?'
other_digestibles = digestibles
if chosen_digestible:
next_state = self.do_transition(chosen_digestible)
else:
LOG.warn('No DIGESTIBLE chosen, THIS MIGHT CAUSE FALSE DEADLOCK, state=%s', self.state.to_short_str())
next_state = self.state
## NOTE: as other digestibles are also enabled in the NEXT state, we return other digestibles here.
## the worker will handle other digestibles in the next round.
return next_state, other_digestibles
@abstractmethod
def choose_digestible(self, digestibles):
pass
def call_action(self, action):
self.oc.call_action(action)
def do_transition(self, digestible):
assert isinstance(digestible, DigestibleBase)
LOG.debug(colorama.Back.BLUE +
"Invoking the action:\n" +
" action=%s\n" +
" event=%s\n" +
" state=%s\n" +
" digestible=%s\n" +
colorama.Style.RESET_ALL,
digestible.action, digestible.event,
self.state.to_short_str(),
digestible)
self.call_action(digestible.action)
next_state = self.state.make_copy()
next_state.append_digestible(digestible)
LOG.debug(colorama.Back.BLUE +
'State Transition: %s->%s' +
colorama.Style.RESET_ALL, self.state.to_short_str(), next_state.to_short_str())
# self.graph.visit_edge(self.state, next_state, digestible)
## NOTE: worker sets self.state to next_state
return next_state
def stat_on_terminal_state(self, past_all_states, past_visit_count, past_visit_count_sum):
"""
TODO: move to LIBEARTHQUAKE.SO
"""
if past_visit_count == 0:
banner = 'TERMINAL STATE(FRONTIER)'
new_all_states = past_all_states + 1
else:
banner = 'TERMINAL STATE(REVISITED)'
new_all_states = past_all_states
LOG.info(
colorama.Back.RED + '%s state %s, count=%d->%d, count_sum=%d->%d, all_states=%d->%d' + colorama.Style.RESET_ALL,
banner,
self.state.to_short_str(),
past_visit_count, past_visit_count + 1,
past_visit_count_sum, past_visit_count_sum + 1,
past_all_states, new_all_states)
def regist_state_to_libeq(self):
json_dict = self.state.to_jsondict()
json_str = json.dumps(json_dict)
short_str = self.state.to_short_str()
rc = self.oc.libearthquake.EQRegistExecutionHistory_UnstableAPI(short_str, json_str)
assert rc == 0
def on_terminal_state(self):
LOG.debug(colorama.Back.RED +
'*** REACH TERMINAL STATE (%s) ***' +
colorama.Style.RESET_ALL, self.state.to_short_str())
self.regist_state_to_libeq()
## make stat (TODO: move to LIBEARTHQUAKE.SO)
all_states = len(self.visited_terminal_states)
visit_count_sum = sum(self.visited_terminal_states.values())
if self.state in self.visited_terminal_states:
visit_count = self.visited_terminal_states[self.state]
else:
visit_count = 0
self.visited_terminal_states[self.state] = 0
self.stat_on_terminal_state(all_states, visit_count, visit_count_sum)
self.visited_terminal_states[self.state] += 1
## notify termination to watchers
for w in self.oc.watchers: w.on_terminal_state(self.state)
## Reset
next_state = self.initial_state.make_copy()
LOG.debug('Reset to %s', next_state.to_short_str())
## notify reset to watchers
for w in self.oc.watchers: w.on_reset()
return next_state
class DumbExplorer(ExplorerBase):
def __init__(self, sleep_msecs=0):
super(DumbExplorer, self).__init__()
self.sleep_msecs = sleep_msecs
def choose_digestible(self, digestibles):
assert (digestibles)
return digestibles[0]
def call_action(self, action):
if self.sleep_msecs:
sleep(self.sleep_msecs / 1000.0)
super(DumbExplorer, self).call_action(action)
class RandomExplorer(ExplorerBase):
def __init__(self, time_slice):
super(RandomExplorer, self).__init__()
self.time_slice = time_slice # msecs
def choose_digestible(self, digestibles):
assert (digestibles)
r = random.randint(0, len(digestibles) - 1)
chosen_digestible = digestibles[r]
return chosen_digestible
class TimeBoundedRandomExplorer(RandomExplorer):
def __init__(self, time_slice, time_bound):
super(TimeBoundedRandomExplorer, self).__init__(time_slice)
self.saved_time_slice = time_slice
self.time_bound = time_bound # msecs
def choose_digestible(self, digestibles):
assert (digestibles)
now = time.time()
hurried = filter(lambda d: (now - d.event.recv_timestamp) * 1000.0 > self.time_bound, digestibles)
if len(hurried) > 0:
LOG.debug('Hurried to send the following %d digestibles, now=%s', len(hurried), now)
LOG.debug(hurried)
self.time_slice = 0
chosen_digestible = hurried[0]
else:
self.time_slice = self.saved_time_slice
r = random.randint(0, len(digestibles) - 1)
chosen_digestible = digestibles[r]
return chosen_digestible
class GreedyExplorer(ExplorerBase):
def __init__(self, time_slice):
super(GreedyExplorer, self).__init__(time_slice)
raise NotImplementedError(
"GreedyExplorer is under refactoring since July 8, 2015. This will revive when new graph storage is implemented (Issue #23)")
def choose_digestible(self, digestibles):
pass
| apache-2.0 |
kmatzen/ansible | lib/ansible/plugins/action/fail.py | 227 | 1391 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
msg = 'Failed as requested from task'
if self._task.args and 'msg' in self._task.args:
msg = self._task.args.get('msg')
result['failed'] = True
result['msg'] = msg
return result
| gpl-3.0 |
mozillazg/firefly | firefly/views/api/comment.py | 9 | 1060 | # coding=utf-8
from __future__ import absolute_import
from flask_restful import Resource
from firefly.models.topic import Comment
from firefly.views.utils import short_timesince
from firefly.views.api.consts import OK
from firefly.views.api.utils import generate_status_fields
class ReplyApi(Resource):
def get(self, id):
comment = Comment.objects.get_or_404(id=id)
replies = comment.get_replies()
status_fields = generate_status_fields(OK)
res = []
for reply in replies:
author = reply.author
res.append({
'id': reply.id,
'author_name': author.name,
'author_avatar': author.avatar(),
'author_url': author.url(),
'author_title': '',
'content': reply.content,
'short_create_at': short_timesince(reply.created_at),
'create_at': reply.created_at.strftime('%H:%M %Y-%m-%d')
})
status_fields.update({'result': res})
return status_fields
| mit |
caiocsalvador/whats_the_craic | lib/python3.4/site-packages/pip/_vendor/requests/structures.py | 1160 | 2977 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| mit |
tarzan0820/addons-yelizariev | base_replace_ref/models.py | 16 | 3169 | from openerp import api, models, fields, SUPERUSER_ID, exceptions
class replace_rule(models.Model):
_name = 'base_replace_ref.rule'
name = fields.Char('Name', required=True)
draft = fields.Boolean('Draft', default=True)
model_id = fields.Many2one('ir.model', 'Model', required=True)
value_line_ids = fields.One2many('base_replace_ref.rule.value_line', 'rule_id', string='Value lines')
field_line_ids = fields.One2many('base_replace_ref.rule.field_line', 'rule_id', string='Field lines')
@api.one
def find_fields(self):
if not self.model_id:
raise exceptions.Warning('Define Model first')
self.draft = True
res = []
cur_fields = [line.field_id.id for line in self.field_line_ids]
for field in self.env['ir.model.fields'].search([('relation', '=', self.model_id.model)]):
if field.id in cur_fields:
continue
self.env['base_replace_ref.rule.field_line'].create({'rule_id': self.id, 'model_id': field.model_id.id, 'field_id': field.id})
@api.one
def clear_fields(self):
self.field_line_ids.unlink()
@api.model
def parse_value(self, model, value):
if not value:
return None
try:
return int(value)
except ValueError:
pass
res = self.env.ref(value)
assert res, 'Value not found for ref %s' % value
return res.id
@api.one
def apply(self):
if self.draft:
raise exceptions.Warning('You cannot apply draft rule')
for vline in self.value_line_ids:
src = self.parse_value(self.model_id.model, vline.src)
dst = self.parse_value(self.model_id.model, vline.dst)
for fline in self.field_line_ids:
self.replace(fline.field_id, src, dst)
@api.model
def replace(self, field_id, src, dst):
model = self.env[field_id.model_id.model]
if field_id.ttype == 'one2many':
r = self.env[field_id.relation].browse(src)
parent_id = getattr(r, field_id.relation_field).id
r.write({field_id.relation_field: None})
self.env[field_id.relation].browse(dst).write({field_id.relation_field: parent_id})
return True
res = model.search([ (field_id.name, '=', src)])
if field_id.ttype == 'many2one':
res.write({field_id.name: dst})
if field_id.ttype == 'many2many':
res.write({field_id.name: [(3, src, False)]})
res.write({field_id.name: [(4, dst, False)]})
class value_line(models.Model):
_name = 'base_replace_ref.rule.value_line'
_src_dst_help = 'ID or Reference'
rule_id = fields.Many2one('base_replace_ref.rule')
src = fields.Char('Source', help=_src_dst_help, required=True)
dst = fields.Char('Destination', help=_src_dst_help)
class field_line(models.Model):
_name = 'base_replace_ref.rule.field_line'
rule_id = fields.Many2one('base_replace_ref.rule')
model_id = fields.Many2one('ir.model', string='Model')
field_id = fields.Many2one('ir.model.fields', string='Field', required=True)
| lgpl-3.0 |
gui2dev/android_kernel_motorola_tinboost | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
mancoast/CPythonPyc_test | cpython/272_test_normalization.py | 76 | 3130 | from test.test_support import run_unittest, open_urlresource
import unittest
from httplib import HTTPException
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest.txt"
TESTDATAURL = "http://www.unicode.org/Public/" + unidata_version + "/ucd/" + TESTDATAFILE
def check_version(testfile):
hdr = testfile.readline()
return unidata_version in hdr
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return u"".join([unichr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part = None
part1_data = {}
# Hit the exception early
try:
testdata = open_urlresource(TESTDATAURL, check_version)
except (IOError, HTTPException):
self.skipTest("Could not retrieve " + TESTDATAURL)
for line in testdata:
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
# try atleast adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
# Perform tests
self.assertTrue(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.assertTrue(c4 == NFC(c4) == NFC(c5), line)
self.assertTrue(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.assertTrue(c5 == NFD(c4) == NFD(c5), line)
self.assertTrue(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = unichr(c)
if X in part1_data:
continue
self.assertTrue(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
# Check for bug 834676
normalize('NFC', u'\ud55c\uae00')
def test_main():
run_unittest(NormalizationTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
drawks/ansible | lib/ansible/modules/network/fortios/fortios_firewall_vip.py | 24 | 47319 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_vip
short_description: Configure virtual IP for IPv4 in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and vip category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_vip:
description:
- Configure virtual IP for IPv4.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
arp-reply:
description:
- Enable to respond to ARP requests for this virtual IP address. Enabled by default.
choices:
- disable
- enable
color:
description:
- Color of icon on the GUI.
comment:
description:
- Comment.
dns-mapping-ttl:
description:
- DNS mapping TTL (Set to zero to use TTL in DNS response, default = 0).
extaddr:
description:
- External FQDN address name.
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name.
required: true
extintf:
description:
- Interface connected to the source network that receives the packets that will be forwarded to the destination network. Source system
.interface.name.
extip:
description:
- IP address or address range on the external interface that you want to map to an address or address range on the destination network.
extport:
description:
- Incoming port number range that you want to map to a port number range on the destination network.
gratuitous-arp-interval:
description:
- Enable to have the VIP send gratuitous ARPs. 0=disabled. Set from 5 up to 8640000 seconds to enable.
http-cookie-age:
description:
- Time in minutes that client web browsers should keep a cookie. Default is 60 seconds. 0 = no time limit.
http-cookie-domain:
description:
- Domain that HTTP cookie persistence should apply to.
http-cookie-domain-from-host:
description:
- Enable/disable use of HTTP cookie domain from host field in HTTP.
choices:
- disable
- enable
http-cookie-generation:
description:
- Generation of HTTP cookie to be accepted. Changing invalidates all existing cookies.
http-cookie-path:
description:
- Limit HTTP cookie persistence to the specified path.
http-cookie-share:
description:
- Control sharing of cookies across virtual servers. same-ip means a cookie from one virtual server can be used by another. Disable stops
cookie sharing.
choices:
- disable
- same-ip
http-ip-header:
description:
- For HTTP multiplexing, enable to add the original client IP address in the XForwarded-For HTTP header.
choices:
- enable
- disable
http-ip-header-name:
description:
- For HTTP multiplexing, enter a custom HTTPS header name. The original client IP address is added to this header. If empty,
X-Forwarded-For is used.
http-multiplex:
description:
- Enable/disable HTTP multiplexing.
choices:
- enable
- disable
https-cookie-secure:
description:
- Enable/disable verification that inserted HTTPS cookies are secure.
choices:
- disable
- enable
id:
description:
- Custom defined ID.
ldb-method:
description:
- Method used to distribute sessions to real servers.
choices:
- static
- round-robin
- weighted
- least-session
- least-rtt
- first-alive
- http-host
mapped-addr:
description:
- Mapped FQDN address name. Source firewall.address.name.
mappedip:
description:
- IP address or address range on the destination network to which the external IP address is mapped.
suboptions:
range:
description:
- Mapped IP range.
required: true
mappedport:
description:
- Port number range on the destination network to which the external port number range is mapped.
max-embryonic-connections:
description:
- Maximum number of incomplete connections.
monitor:
description:
- Name of the health check monitor to use when polling to determine a virtual server's connectivity status.
suboptions:
name:
description:
- Health monitor name. Source firewall.ldb-monitor.name.
required: true
name:
description:
- Virtual IP name.
required: true
nat-source-vip:
description:
- Enable to prevent unintended servers from using a virtual IP. Disable to use the actual IP address of the server as the source address.
choices:
- disable
- enable
outlook-web-access:
description:
- Enable to add the Front-End-Https header for Microsoft Outlook Web Access.
choices:
- disable
- enable
persistence:
description:
- Configure how to make sure that clients connect to the same server every time they make a request that is part of the same session.
choices:
- none
- http-cookie
- ssl-session-id
portforward:
description:
- Enable/disable port forwarding.
choices:
- disable
- enable
portmapping-type:
description:
- Port mapping type.
choices:
- 1-to-1
- m-to-n
protocol:
description:
- Protocol to use when forwarding packets.
choices:
- tcp
- udp
- sctp
- icmp
realservers:
description:
- Select the real servers that this server load balancing VIP will distribute traffic to.
suboptions:
client-ip:
description:
- Only clients in this IP range can connect to this real server.
healthcheck:
description:
- Enable to check the responsiveness of the real server before forwarding traffic.
choices:
- disable
- enable
- vip
holddown-interval:
description:
- Time in seconds that the health check monitor continues to monitor and unresponsive server that should be active.
http-host:
description:
- HTTP server domain name in HTTP header.
id:
description:
- Real server ID.
required: true
ip:
description:
- IP address of the real server.
max-connections:
description:
- Max number of active connections that can be directed to the real server. When reached, sessions are sent to other real servers.
monitor:
description:
- Name of the health check monitor to use when polling to determine a virtual server's connectivity status. Source firewall
.ldb-monitor.name.
port:
description:
- Port for communicating with the real server. Required if port forwarding is enabled.
status:
description:
- Set the status of the real server to active so that it can accept traffic, or on standby or disabled so no traffic is sent.
choices:
- active
- standby
- disable
weight:
description:
- Weight of the real server. If weighted load balancing is enabled, the server with the highest weight gets more connections.
server-type:
description:
- Protocol to be load balanced by the virtual server (also called the server load balance virtual IP).
choices:
- http
- https
- imaps
- pop3s
- smtps
- ssl
- tcp
- udp
- ip
service:
description:
- Service name.
suboptions:
name:
description:
- Service name. Source firewall.service.custom.name firewall.service.group.name.
required: true
src-filter:
description:
- Source address filter. Each address must be either an IP/subnet (x.x.x.x/n) or a range (x.x.x.x-y.y.y.y). Separate addresses with spaces.
suboptions:
range:
description:
- Source-filter range.
required: true
srcintf-filter:
description:
- Interfaces to which the VIP applies. Separate the names with spaces.
suboptions:
interface-name:
description:
- Interface name. Source system.interface.name.
required: true
ssl-algorithm:
description:
- Permitted encryption algorithms for SSL sessions according to encryption strength.
choices:
- high
- medium
- low
- custom
ssl-certificate:
description:
- The name of the SSL certificate to use for SSL acceleration. Source vpn.certificate.local.name.
ssl-cipher-suites:
description:
- SSL/TLS cipher suites acceptable from a client, ordered by priority.
suboptions:
cipher:
description:
- Cipher suite name.
choices:
- TLS-RSA-WITH-3DES-EDE-CBC-SHA
- TLS-DHE-RSA-WITH-DES-CBC-SHA
- TLS-DHE-DSS-WITH-DES-CBC-SHA
priority:
description:
- SSL/TLS cipher suites priority.
required: true
versions:
description:
- SSL/TLS versions that the cipher suite can be used with.
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl-client-fallback:
description:
- Enable/disable support for preventing Downgrade Attacks on client connections (RFC 7507).
choices:
- disable
- enable
ssl-client-renegotiation:
description:
- Allow, deny, or require secure renegotiation of client sessions to comply with RFC 5746.
choices:
- allow
- deny
- secure
ssl-client-session-state-max:
description:
- Maximum number of client to FortiGate SSL session states to keep.
ssl-client-session-state-timeout:
description:
- Number of minutes to keep client to FortiGate SSL session state.
ssl-client-session-state-type:
description:
- How to expire SSL sessions for the segment of the SSL connection between the client and the FortiGate.
choices:
- disable
- time
- count
- both
ssl-dh-bits:
description:
- Number of bits to use in the Diffie-Hellman exchange for RSA encryption of SSL sessions.
choices:
- 768
- 1024
- 1536
- 2048
- 3072
- 4096
ssl-hpkp:
description:
- Enable/disable including HPKP header in response.
choices:
- disable
- enable
- report-only
ssl-hpkp-age:
description:
- Number of seconds the client should honour the HPKP setting.
ssl-hpkp-backup:
description:
- Certificate to generate backup HPKP pin from. Source vpn.certificate.local.name vpn.certificate.ca.name.
ssl-hpkp-include-subdomains:
description:
- Indicate that HPKP header applies to all subdomains.
choices:
- disable
- enable
ssl-hpkp-primary:
description:
- Certificate to generate primary HPKP pin from. Source vpn.certificate.local.name vpn.certificate.ca.name.
ssl-hpkp-report-uri:
description:
- URL to report HPKP violations to.
ssl-hsts:
description:
- Enable/disable including HSTS header in response.
choices:
- disable
- enable
ssl-hsts-age:
description:
- Number of seconds the client should honour the HSTS setting.
ssl-hsts-include-subdomains:
description:
- Indicate that HSTS header applies to all subdomains.
choices:
- disable
- enable
ssl-http-location-conversion:
description:
- Enable to replace HTTP with HTTPS in the reply's Location HTTP header field.
choices:
- enable
- disable
ssl-http-match-host:
description:
- Enable/disable HTTP host matching for location conversion.
choices:
- enable
- disable
ssl-max-version:
description:
- Highest SSL/TLS version acceptable from a client.
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl-min-version:
description:
- Lowest SSL/TLS version acceptable from a client.
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl-mode:
description:
- Apply SSL offloading between the client and the FortiGate (half) or from the client to the FortiGate and from the FortiGate to the
server (full).
choices:
- half
- full
ssl-pfs:
description:
- Select the cipher suites that can be used for SSL perfect forward secrecy (PFS). Applies to both client and server sessions.
choices:
- require
- deny
- allow
ssl-send-empty-frags:
description:
- Enable/disable sending empty fragments to avoid CBC IV attacks (SSL 3.0 & TLS 1.0 only). May need to be disabled for compatibility with
older systems.
choices:
- enable
- disable
ssl-server-algorithm:
description:
- Permitted encryption algorithms for the server side of SSL full mode sessions according to encryption strength.
choices:
- high
- medium
- low
- custom
- client
ssl-server-cipher-suites:
description:
- SSL/TLS cipher suites to offer to a server, ordered by priority.
suboptions:
cipher:
description:
- Cipher suite name.
choices:
- TLS-RSA-WITH-3DES-EDE-CBC-SHA
- TLS-DHE-RSA-WITH-DES-CBC-SHA
- TLS-DHE-DSS-WITH-DES-CBC-SHA
priority:
description:
- SSL/TLS cipher suites priority.
required: true
versions:
description:
- SSL/TLS versions that the cipher suite can be used with.
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl-server-max-version:
description:
- Highest SSL/TLS version acceptable from a server. Use the client setting by default.
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
- client
ssl-server-min-version:
description:
- Lowest SSL/TLS version acceptable from a server. Use the client setting by default.
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
- client
ssl-server-session-state-max:
description:
- Maximum number of FortiGate to Server SSL session states to keep.
ssl-server-session-state-timeout:
description:
- Number of minutes to keep FortiGate to Server SSL session state.
ssl-server-session-state-type:
description:
- How to expire SSL sessions for the segment of the SSL connection between the server and the FortiGate.
choices:
- disable
- time
- count
- both
type:
description:
- Configure a static NAT, load balance, server load balance, DNS translation, or FQDN VIP.
choices:
- static-nat
- load-balance
- server-load-balance
- dns-translation
- fqdn
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
weblogic-server:
description:
- Enable to add an HTTP header to indicate SSL offloading for a WebLogic server.
choices:
- disable
- enable
websphere-server:
description:
- Enable to add an HTTP header to indicate SSL offloading for a WebSphere server.
choices:
- disable
- enable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure virtual IP for IPv4.
fortios_firewall_vip:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_vip:
state: "present"
arp-reply: "disable"
color: "4"
comment: "Comment."
dns-mapping-ttl: "6"
extaddr:
-
name: "default_name_8 (source firewall.address.name firewall.addrgrp.name)"
extintf: "<your_own_value> (source system.interface.name)"
extip: "<your_own_value>"
extport: "<your_own_value>"
gratuitous-arp-interval: "12"
http-cookie-age: "13"
http-cookie-domain: "<your_own_value>"
http-cookie-domain-from-host: "disable"
http-cookie-generation: "16"
http-cookie-path: "<your_own_value>"
http-cookie-share: "disable"
http-ip-header: "enable"
http-ip-header-name: "<your_own_value>"
http-multiplex: "enable"
https-cookie-secure: "disable"
id: "23"
ldb-method: "static"
mapped-addr: "<your_own_value> (source firewall.address.name)"
mappedip:
-
range: "<your_own_value>"
mappedport: "<your_own_value>"
max-embryonic-connections: "29"
monitor:
-
name: "default_name_31 (source firewall.ldb-monitor.name)"
name: "default_name_32"
nat-source-vip: "disable"
outlook-web-access: "disable"
persistence: "none"
portforward: "disable"
portmapping-type: "1-to-1"
protocol: "tcp"
realservers:
-
client-ip: "<your_own_value>"
healthcheck: "disable"
holddown-interval: "42"
http-host: "myhostname"
id: "44"
ip: "<your_own_value>"
max-connections: "46"
monitor: "<your_own_value> (source firewall.ldb-monitor.name)"
port: "48"
status: "active"
weight: "50"
server-type: "http"
service:
-
name: "default_name_53 (source firewall.service.custom.name firewall.service.group.name)"
src-filter:
-
range: "<your_own_value>"
srcintf-filter:
-
interface-name: "<your_own_value> (source system.interface.name)"
ssl-algorithm: "high"
ssl-certificate: "<your_own_value> (source vpn.certificate.local.name)"
ssl-cipher-suites:
-
cipher: "TLS-RSA-WITH-3DES-EDE-CBC-SHA"
priority: "62"
versions: "ssl-3.0"
ssl-client-fallback: "disable"
ssl-client-renegotiation: "allow"
ssl-client-session-state-max: "66"
ssl-client-session-state-timeout: "67"
ssl-client-session-state-type: "disable"
ssl-dh-bits: "768"
ssl-hpkp: "disable"
ssl-hpkp-age: "71"
ssl-hpkp-backup: "<your_own_value> (source vpn.certificate.local.name vpn.certificate.ca.name)"
ssl-hpkp-include-subdomains: "disable"
ssl-hpkp-primary: "<your_own_value> (source vpn.certificate.local.name vpn.certificate.ca.name)"
ssl-hpkp-report-uri: "<your_own_value>"
ssl-hsts: "disable"
ssl-hsts-age: "77"
ssl-hsts-include-subdomains: "disable"
ssl-http-location-conversion: "enable"
ssl-http-match-host: "enable"
ssl-max-version: "ssl-3.0"
ssl-min-version: "ssl-3.0"
ssl-mode: "half"
ssl-pfs: "require"
ssl-send-empty-frags: "enable"
ssl-server-algorithm: "high"
ssl-server-cipher-suites:
-
cipher: "TLS-RSA-WITH-3DES-EDE-CBC-SHA"
priority: "89"
versions: "ssl-3.0"
ssl-server-max-version: "ssl-3.0"
ssl-server-min-version: "ssl-3.0"
ssl-server-session-state-max: "93"
ssl-server-session-state-timeout: "94"
ssl-server-session-state-type: "disable"
type: "static-nat"
uuid: "<your_own_value>"
weblogic-server: "disable"
websphere-server: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_vip_data(json):
option_list = ['arp-reply', 'color', 'comment',
'dns-mapping-ttl', 'extaddr', 'extintf',
'extip', 'extport', 'gratuitous-arp-interval',
'http-cookie-age', 'http-cookie-domain', 'http-cookie-domain-from-host',
'http-cookie-generation', 'http-cookie-path', 'http-cookie-share',
'http-ip-header', 'http-ip-header-name', 'http-multiplex',
'https-cookie-secure', 'id', 'ldb-method',
'mapped-addr', 'mappedip', 'mappedport',
'max-embryonic-connections', 'monitor', 'name',
'nat-source-vip', 'outlook-web-access', 'persistence',
'portforward', 'portmapping-type', 'protocol',
'realservers', 'server-type', 'service',
'src-filter', 'srcintf-filter', 'ssl-algorithm',
'ssl-certificate', 'ssl-cipher-suites', 'ssl-client-fallback',
'ssl-client-renegotiation', 'ssl-client-session-state-max', 'ssl-client-session-state-timeout',
'ssl-client-session-state-type', 'ssl-dh-bits', 'ssl-hpkp',
'ssl-hpkp-age', 'ssl-hpkp-backup', 'ssl-hpkp-include-subdomains',
'ssl-hpkp-primary', 'ssl-hpkp-report-uri', 'ssl-hsts',
'ssl-hsts-age', 'ssl-hsts-include-subdomains', 'ssl-http-location-conversion',
'ssl-http-match-host', 'ssl-max-version', 'ssl-min-version',
'ssl-mode', 'ssl-pfs', 'ssl-send-empty-frags',
'ssl-server-algorithm', 'ssl-server-cipher-suites', 'ssl-server-max-version',
'ssl-server-min-version', 'ssl-server-session-state-max', 'ssl-server-session-state-timeout',
'ssl-server-session-state-type', 'type', 'uuid',
'weblogic-server', 'websphere-server']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_vip(data, fos):
vdom = data['vdom']
firewall_vip_data = data['firewall_vip']
filtered_data = filter_firewall_vip_data(firewall_vip_data)
if firewall_vip_data['state'] == "present":
return fos.set('firewall',
'vip',
data=filtered_data,
vdom=vdom)
elif firewall_vip_data['state'] == "absent":
return fos.delete('firewall',
'vip',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_vip']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_vip": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"arp-reply": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"dns-mapping-ttl": {"required": False, "type": "int"},
"extaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"extintf": {"required": False, "type": "str"},
"extip": {"required": False, "type": "str"},
"extport": {"required": False, "type": "str"},
"gratuitous-arp-interval": {"required": False, "type": "int"},
"http-cookie-age": {"required": False, "type": "int"},
"http-cookie-domain": {"required": False, "type": "str"},
"http-cookie-domain-from-host": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"http-cookie-generation": {"required": False, "type": "int"},
"http-cookie-path": {"required": False, "type": "str"},
"http-cookie-share": {"required": False, "type": "str",
"choices": ["disable", "same-ip"]},
"http-ip-header": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"http-ip-header-name": {"required": False, "type": "str"},
"http-multiplex": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"https-cookie-secure": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"id": {"required": False, "type": "int"},
"ldb-method": {"required": False, "type": "str",
"choices": ["static", "round-robin", "weighted",
"least-session", "least-rtt", "first-alive",
"http-host"]},
"mapped-addr": {"required": False, "type": "str"},
"mappedip": {"required": False, "type": "list",
"options": {
"range": {"required": True, "type": "str"}
}},
"mappedport": {"required": False, "type": "str"},
"max-embryonic-connections": {"required": False, "type": "int"},
"monitor": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"nat-source-vip": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"outlook-web-access": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"persistence": {"required": False, "type": "str",
"choices": ["none", "http-cookie", "ssl-session-id"]},
"portforward": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"portmapping-type": {"required": False, "type": "str",
"choices": ["1-to-1", "m-to-n"]},
"protocol": {"required": False, "type": "str",
"choices": ["tcp", "udp", "sctp",
"icmp"]},
"realservers": {"required": False, "type": "list",
"options": {
"client-ip": {"required": False, "type": "str"},
"healthcheck": {"required": False, "type": "str",
"choices": ["disable", "enable", "vip"]},
"holddown-interval": {"required": False, "type": "int"},
"http-host": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"ip": {"required": False, "type": "str"},
"max-connections": {"required": False, "type": "int"},
"monitor": {"required": False, "type": "str"},
"port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["active", "standby", "disable"]},
"weight": {"required": False, "type": "int"}
}},
"server-type": {"required": False, "type": "str",
"choices": ["http", "https", "imaps",
"pop3s", "smtps", "ssl",
"tcp", "udp", "ip"]},
"service": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"src-filter": {"required": False, "type": "list",
"options": {
"range": {"required": True, "type": "str"}
}},
"srcintf-filter": {"required": False, "type": "list",
"options": {
"interface-name": {"required": True, "type": "str"}
}},
"ssl-algorithm": {"required": False, "type": "str",
"choices": ["high", "medium", "low",
"custom"]},
"ssl-certificate": {"required": False, "type": "str"},
"ssl-cipher-suites": {"required": False, "type": "list",
"options": {
"cipher": {"required": False, "type": "str",
"choices": ["TLS-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-RSA-WITH-DES-CBC-SHA",
"TLS-DHE-DSS-WITH-DES-CBC-SHA"]},
"priority": {"required": True, "type": "int"},
"versions": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]}
}},
"ssl-client-fallback": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl-client-renegotiation": {"required": False, "type": "str",
"choices": ["allow", "deny", "secure"]},
"ssl-client-session-state-max": {"required": False, "type": "int"},
"ssl-client-session-state-timeout": {"required": False, "type": "int"},
"ssl-client-session-state-type": {"required": False, "type": "str",
"choices": ["disable", "time", "count",
"both"]},
"ssl-dh-bits": {"required": False, "type": "str",
"choices": ["768", "1024", "1536",
"2048", "3072", "4096"]},
"ssl-hpkp": {"required": False, "type": "str",
"choices": ["disable", "enable", "report-only"]},
"ssl-hpkp-age": {"required": False, "type": "int"},
"ssl-hpkp-backup": {"required": False, "type": "str"},
"ssl-hpkp-include-subdomains": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl-hpkp-primary": {"required": False, "type": "str"},
"ssl-hpkp-report-uri": {"required": False, "type": "str"},
"ssl-hsts": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl-hsts-age": {"required": False, "type": "int"},
"ssl-hsts-include-subdomains": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl-http-location-conversion": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl-http-match-host": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl-max-version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]},
"ssl-min-version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]},
"ssl-mode": {"required": False, "type": "str",
"choices": ["half", "full"]},
"ssl-pfs": {"required": False, "type": "str",
"choices": ["require", "deny", "allow"]},
"ssl-send-empty-frags": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl-server-algorithm": {"required": False, "type": "str",
"choices": ["high", "medium", "low",
"custom", "client"]},
"ssl-server-cipher-suites": {"required": False, "type": "list",
"options": {
"cipher": {"required": False, "type": "str",
"choices": ["TLS-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-RSA-WITH-DES-CBC-SHA",
"TLS-DHE-DSS-WITH-DES-CBC-SHA"]},
"priority": {"required": True, "type": "int"},
"versions": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]}
}},
"ssl-server-max-version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2", "client"]},
"ssl-server-min-version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2", "client"]},
"ssl-server-session-state-max": {"required": False, "type": "int"},
"ssl-server-session-state-timeout": {"required": False, "type": "int"},
"ssl-server-session-state-type": {"required": False, "type": "str",
"choices": ["disable", "time", "count",
"both"]},
"type": {"required": False, "type": "str",
"choices": ["static-nat", "load-balance", "server-load-balance",
"dns-translation", "fqdn"]},
"uuid": {"required": False, "type": "str"},
"weblogic-server": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"websphere-server": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
izzyalonso/tndata_backend | tndata_backend/utils/mixins.py | 2 | 3723 | from django.conf import settings
from django.contrib.auth.decorators import login_required
from redis_metrics import metric
from rest_framework.exceptions import APIException
class LoginRequiredMixin(object):
"""A mixin for a class-based view that requires the user to be logged in.
Borrowed from: https://goo.gl/CtYx3s
"""
@classmethod
def as_view(cls, **initkwargs):
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
# API ViewSet mixins
# ------------------
class VersionedViewSetMixin:
"""This mixin adds a api version lookup to a viewset. For this to work,
include this mixin on your ViewSet class, and set a `serializer_class_vX`
attribute, e.g.:
serializer_class_v1 = serializers.v1.FooSerializer
serializer_class_v2 = serializers.v2.FooSerializer
If for some reason the class does not include the appropriate serializer
class attribute, the default version will be returned.
----
Versioned Docs (via docstrings)
This mixin also makes use of DRF's self-describing API feature, but provides
a method to include different docs for different versions of the api.
To enable this, a `get_docstring` method will look for a `docstring_prefix`
attribute which should be a directory containing your documentation. The
convention is that your doc is the viewset's name, lowercased, with the
version number tacked on the end; in a markdown file with a .md extension.
For example: `UserViewSet`'s documentation for version 2 should be in a
file named `userviewset_v2.md`. And if `docstring_prefix = "api_docs"`,
this mixin will load the file (from your project root):
api_docs/userviewset_v2.md
"""
# Mapping of version number to local, expected attribute name
_versions = {
'1': 'serializer_class_v1',
'2': 'serializer_class_v2',
}
def get_serializer_class(self):
try:
attr_name = self._versions[self.request.version]
serializer_class = getattr(self, attr_name)
except (KeyError, AttributeError):
default = settings.REST_FRAMEWORK['DEFAULT_VERSION']
attr_name = "serializer_class_v{}".format(default)
serializer_class = getattr(self, attr_name)
if serializer_class is None:
raise APIException("This version of the api has no serializer.")
# HACK: Dynamically write a new docstring based on the api version
# It really makes little sense to do this work here, but this is
# the appropriate part of this object's lifecycle to hook into this.
docstring = self.get_docstring()
if docstring:
self.__class__.__doc__ = docstring
return serializer_class
def get_docstring(self):
"""Dynamically load a markdown file based on the api version; its
content gets cached forever."""
docstring = None
docstring_prefix = getattr(self, 'docstring_prefix', None)
if docstring_prefix:
try:
doc_file = "{}/{}_v{}.md".format(
docstring_prefix,
self.__class__.__name__.lower(),
self.request.version
)
docstring = open(doc_file).read()
except FileNotFoundError: # noqa
pass
return docstring
class TombstoneMixin:
"""This mixin records a metric when an object is created."""
def __init__(self, *args, **kwargs):
key = "{} created".format(self.__class__.__name__)
metric(key, category="Tombstones")
return super().__init__(*args, **kwargs)
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/IPython/utils/decorators.py | 36 | 2071 | # encoding: utf-8
"""Decorators that don't go anywhere else.
This module contains misc. decorators that don't really go with another module
in :mod:`IPython.utils`. Beore putting something here please see if it should
go into another topical module in :mod:`IPython.utils`.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def undoc(func):
"""Mark a function or class as undocumented.
This is found by inspecting the AST, so for now it must be used directly
as @undoc, not as e.g. @decorators.undoc
"""
return func
| gpl-3.0 |
synergeticsedx/deployment-wipro | pavelib/paver_tests/test_servers.py | 15 | 11645 | """Unit tests for the Paver server tasks."""
import ddt
from paver.easy import call_task
from .utils import PaverTestCase
EXPECTED_COFFEE_COMMAND = (
u"node_modules/.bin/coffee --compile `find {platform_root}/lms "
u"{platform_root}/cms {platform_root}/common -type f -name \"*.coffee\"`"
)
EXPECTED_SASS_COMMAND = (
u"libsass {sass_directory}"
)
EXPECTED_COMMON_SASS_DIRECTORIES = [
u"common/static/sass",
]
EXPECTED_LMS_SASS_DIRECTORIES = [
u"lms/static/sass",
u"lms/static/certificates/sass",
]
EXPECTED_CMS_SASS_DIRECTORIES = [
u"cms/static/sass",
]
EXPECTED_LMS_SASS_COMMAND = [
u"python manage.py lms --settings={asset_settings} compile_sass lms ",
]
EXPECTED_CMS_SASS_COMMAND = [
u"python manage.py cms --settings={asset_settings} compile_sass cms ",
]
EXPECTED_COLLECT_STATIC_COMMAND = (
u"python manage.py {system} --settings={asset_settings} collectstatic --noinput {log_string}"
)
EXPECTED_CELERY_COMMAND = (
u"python manage.py lms --settings={settings} celery worker --beat --loglevel=INFO --pythonpath=."
)
EXPECTED_RUN_SERVER_COMMAND = (
u"python manage.py {system} --settings={settings} runserver --traceback --pythonpath=. 0.0.0.0:{port}"
)
EXPECTED_INDEX_COURSE_COMMAND = (
u"python manage.py {system} --settings={settings} reindex_course --setup"
)
@ddt.ddt
class TestPaverServerTasks(PaverTestCase):
"""
Test the Paver server tasks.
"""
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"port": 8030}],
)
@ddt.unpack
def test_lms(self, options):
"""
Test the "devstack" task.
"""
self.verify_server_task("lms", options)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"port": 8031}],
)
@ddt.unpack
def test_studio(self, options):
"""
Test the "devstack" task.
"""
self.verify_server_task("studio", options)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset-settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"optimized": True}],
[{"optimized": True, "fast": True}],
[{"no-contracts": True}],
)
@ddt.unpack
def test_devstack(self, server_options):
"""
Test the "devstack" task.
"""
options = server_options.copy()
is_optimized = options.get("optimized", False)
expected_settings = "devstack_optimized" if is_optimized else options.get("settings", "devstack")
# First test with LMS
options["system"] = "lms"
options["expected_messages"] = [
EXPECTED_INDEX_COURSE_COMMAND.format(
system="cms",
settings=expected_settings,
)
]
self.verify_server_task("devstack", options, contracts_default=True)
# Then test with Studio
options["system"] = "cms"
options["expected_messages"] = [
EXPECTED_INDEX_COURSE_COMMAND.format(
system="cms",
settings=expected_settings,
)
]
self.verify_server_task("devstack", options, contracts_default=True)
@ddt.data(
[{}],
[{"settings": "aws"}],
[{"asset_settings": "test_static_optimized"}],
[{"settings": "devstack_optimized", "asset-settings": "test_static_optimized"}],
[{"fast": True}],
[{"optimized": True}],
[{"optimized": True, "fast": True}],
)
@ddt.unpack
def test_run_all_servers(self, options):
"""
Test the "run_all_servers" task.
"""
self.verify_run_all_servers_task(options)
@ddt.data(
[{}],
[{"settings": "aws"}],
)
@ddt.unpack
def test_celery(self, options):
"""
Test the "celery" task.
"""
settings = options.get("settings", "dev_with_worker")
call_task("pavelib.servers.celery", options=options)
self.assertEquals(self.task_messages, [EXPECTED_CELERY_COMMAND.format(settings=settings)])
@ddt.data(
[{}],
[{"settings": "aws"}],
)
@ddt.unpack
def test_update_db(self, options):
"""
Test the "update_db" task.
"""
settings = options.get("settings", "devstack")
call_task("pavelib.servers.update_db", options=options)
# pylint: disable=line-too-long
db_command = "NO_EDXAPP_SUDO=1 EDX_PLATFORM_SETTINGS_OVERRIDE={settings} /edx/bin/edxapp-migrate-{server} --traceback --pythonpath=. "
self.assertEquals(
self.task_messages,
[
db_command.format(server="lms", settings=settings),
db_command.format(server="cms", settings=settings),
]
)
@ddt.data(
["lms", {}],
["lms", {"settings": "aws"}],
["cms", {}],
["cms", {"settings": "aws"}],
)
@ddt.unpack
def test_check_settings(self, system, options):
"""
Test the "check_settings" task.
"""
settings = options.get("settings", "devstack")
call_task("pavelib.servers.check_settings", args=[system, settings])
self.assertEquals(
self.task_messages,
[
"echo 'import {system}.envs.{settings}' "
"| python manage.py {system} --settings={settings} shell --plain --pythonpath=.".format(
system=system, settings=settings
),
]
)
def verify_server_task(self, task_name, options, contracts_default=False):
"""
Verify the output of a server task.
"""
log_string = options.get("log_string", "> /dev/null")
settings = options.get("settings", None)
asset_settings = options.get("asset-settings", None)
is_optimized = options.get("optimized", False)
is_fast = options.get("fast", False)
no_contracts = options.get("no-contracts", not contracts_default)
if task_name == "devstack":
system = options.get("system")
elif task_name == "studio":
system = "cms"
else:
system = "lms"
port = options.get("port", "8000" if system == "lms" else "8001")
self.reset_task_messages()
if task_name == "devstack":
args = ["studio" if system == "cms" else system]
if settings:
args.append("--settings={settings}".format(settings=settings))
if asset_settings:
args.append("--asset-settings={asset_settings}".format(asset_settings=asset_settings))
if is_optimized:
args.append("--optimized")
if is_fast:
args.append("--fast")
if no_contracts:
args.append("--no-contracts")
call_task("pavelib.servers.devstack", args=args)
else:
call_task("pavelib.servers.{task_name}".format(task_name=task_name), options=options)
expected_messages = options.get("expected_messages", [])
expected_settings = settings if settings else "devstack"
expected_asset_settings = asset_settings if asset_settings else expected_settings
if is_optimized:
expected_settings = "devstack_optimized"
expected_asset_settings = "test_static_optimized"
expected_collect_static = not is_fast and expected_settings != "devstack"
if not is_fast:
expected_messages.append(u"xmodule_assets common/static/xmodule")
expected_messages.append(u"install npm_assets")
expected_messages.append(EXPECTED_COFFEE_COMMAND.format(platform_root=self.platform_root))
expected_messages.extend(self.expected_sass_commands(system=system, asset_settings=expected_asset_settings))
if expected_collect_static:
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system=system, asset_settings=expected_asset_settings, log_string=log_string
))
expected_run_server_command = EXPECTED_RUN_SERVER_COMMAND.format(
system=system,
settings=expected_settings,
port=port,
)
if not no_contracts:
expected_run_server_command += " --contracts"
expected_messages.append(expected_run_server_command)
self.assertEquals(self.task_messages, expected_messages)
def verify_run_all_servers_task(self, options):
"""
Verify the output of a server task.
"""
log_string = options.get("log_string", "> /dev/null")
settings = options.get("settings", None)
asset_settings = options.get("asset_settings", None)
is_optimized = options.get("optimized", False)
is_fast = options.get("fast", False)
self.reset_task_messages()
call_task("pavelib.servers.run_all_servers", options=options)
expected_settings = settings if settings else "devstack"
expected_asset_settings = asset_settings if asset_settings else expected_settings
if is_optimized:
expected_settings = "devstack_optimized"
expected_asset_settings = "test_static_optimized"
expected_collect_static = not is_fast and expected_settings != "devstack"
expected_messages = []
if not is_fast:
expected_messages.append(u"xmodule_assets common/static/xmodule")
expected_messages.append(u"install npm_assets")
expected_messages.append(EXPECTED_COFFEE_COMMAND.format(platform_root=self.platform_root))
expected_messages.extend(self.expected_sass_commands(asset_settings=expected_asset_settings))
if expected_collect_static:
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system="lms", asset_settings=expected_asset_settings, log_string=log_string
))
expected_messages.append(EXPECTED_COLLECT_STATIC_COMMAND.format(
system="cms", asset_settings=expected_asset_settings, log_string=log_string
))
expected_messages.append(
EXPECTED_RUN_SERVER_COMMAND.format(
system="lms",
settings=expected_settings,
port=8000,
)
)
expected_messages.append(
EXPECTED_RUN_SERVER_COMMAND.format(
system="cms",
settings=expected_settings,
port=8001,
)
)
expected_messages.append(EXPECTED_CELERY_COMMAND.format(settings="dev_with_worker"))
self.assertEquals(self.task_messages, expected_messages)
def expected_sass_commands(self, system=None, asset_settings=u"test_static_optimized"):
"""
Returns the expected SASS commands for the specified system.
"""
expected_sass_commands = []
if system != 'cms':
expected_sass_commands.extend(EXPECTED_LMS_SASS_COMMAND)
if system != 'lms':
expected_sass_commands.extend(EXPECTED_CMS_SASS_COMMAND)
return [command.format(asset_settings=asset_settings) for command in expected_sass_commands]
| agpl-3.0 |
glls/Cinnamon | files/usr/share/cinnamon/cinnamon-settings-users/cinnamon-settings-users.py | 2 | 40589 | #!/usr/bin/python3
import os
import pwd
import grp
import gettext
import shutil
import re
import subprocess
from random import randint
from setproctitle import setproctitle
import PIL
from PIL import Image
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("AccountsService", "1.0")
from gi.repository import Gtk, GObject, Gio, GdkPixbuf, AccountsService, GLib
gettext.install("cinnamon", "/usr/share/locale")
class PrivHelper(object):
"""A helper for performing temporary privilege drops. Necessary for
security when accessing user controlled files as root."""
def __init__(self):
self.orig_uid = os.getuid()
self.orig_gid = os.getgid()
self.orig_groups = os.getgroups()
def drop_privs(self, user):
uid = user.get_uid()
# the user's main group id
gid = pwd.getpwuid(uid).pw_gid
# initialize the user's supplemental groups and main group
os.initgroups(user.get_user_name(), gid)
os.setegid(gid)
os.seteuid(uid)
def restore_privs(self):
os.seteuid(self.orig_uid)
os.setegid(self.orig_gid)
os.setgroups(self.orig_groups)
priv_helper = PrivHelper()
(INDEX_USER_OBJECT, INDEX_USER_PICTURE, INDEX_USER_DESCRIPTION) = range(3)
(INDEX_GID, INDEX_GROUPNAME) = range(2)
class GroupDialog (Gtk.Dialog):
def __init__ (self, label, value, parent = None):
super(GroupDialog, self).__init__(None, parent)
try:
self.set_modal(True)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
self.set_title("")
table = DimmedTable()
table.add_labels([label])
self.entry = Gtk.Entry()
self.entry.set_text(value)
self.entry.connect("changed", self._on_entry_changed)
table.add_controls([self.entry])
self.set_border_width(6)
box = self.get_content_area()
box.add(table)
self.show_all()
self.add_buttons(_("Cancel"), Gtk.ResponseType.CANCEL, _("OK"), Gtk.ResponseType.OK, )
self.set_response_sensitive(Gtk.ResponseType.OK, False)
except Exception as detail:
print(detail)
def _on_entry_changed(self, entry):
name = entry.get_text()
if " " in name or name.lower() != name:
entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "dialog-warning-symbolic")
entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("The group name cannot contain upper-case or space characters"))
self.set_response_sensitive(Gtk.ResponseType.OK, False)
else:
entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, None)
self.set_response_sensitive(Gtk.ResponseType.OK, True)
if entry.get_text() == "":
self.set_response_sensitive(Gtk.ResponseType.OK, False)
class DimmedTable (Gtk.Table):
def __init__ (self):
super(DimmedTable, self).__init__()
self.set_border_width(6)
self.set_row_spacings(8)
self.set_col_spacings(15)
def add_labels(self, texts):
row = 0
for text in texts:
if text != None:
label = Gtk.Label(text)
label.set_alignment(1, 0.5)
label.get_style_context().add_class("dim-label")
self.attach(label, 0, 1, row, row+1, xoptions=Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL)
row = row + 1
def add_controls(self, controls):
row = 0
for control in controls:
self.attach(control, 1, 2, row, row+1)
row = row + 1
class EditableEntry (Gtk.Notebook):
__gsignals__ = {
'changed': (GObject.SIGNAL_RUN_FIRST, None,
(str,))
}
PAGE_BUTTON = 0
PAGE_ENTRY = 1
def __init__ (self):
super(EditableEntry, self).__init__()
self.label = Gtk.Label()
self.entry = Gtk.Entry()
self.button = Gtk.Button()
self.button.set_alignment(0.0, 0.5)
self.button.set_relief(Gtk.ReliefStyle.NONE)
self.append_page(self.button, None);
self.append_page(self.entry, None);
self.set_current_page(0)
self.set_show_tabs(False)
self.set_show_border(False)
self.editable = False
self.show_all()
self.button.connect("released", self._on_button_clicked)
self.button.connect("activate", self._on_button_clicked)
self.entry.connect("activate", self._on_entry_validated)
self.entry.connect("changed", self._on_entry_changed)
def set_text(self, text):
self.button.set_label(text)
self.entry.set_text(text)
def _on_button_clicked(self, button):
self.set_editable(True)
def _on_entry_validated(self, entry):
self.set_editable(False)
self.emit("changed", entry.get_text())
def _on_entry_changed(self, entry):
self.button.set_label(entry.get_text())
def set_editable(self, editable):
if (editable):
self.set_current_page(EditableEntry.PAGE_ENTRY)
else:
self.set_current_page(EditableEntry.PAGE_BUTTON)
self.editable = editable
def set_tooltip_text(self, tooltip):
self.button.set_tooltip_text(tooltip)
def get_editable(self):
return self.editable
def get_text(self):
return self.entry.get_text()
class PasswordDialog(Gtk.Dialog):
def __init__ (self, user, password_mask, group_mask, parent = None):
super(PasswordDialog, self).__init__(None, parent)
self.user = user
self.password_mask = password_mask
self.group_mask = group_mask
self.set_modal(True)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
self.set_title(_("Change Password"))
table = DimmedTable()
table.add_labels([_("New password"), None, _("Confirm password")])
self.new_password = Gtk.Entry()
self.new_password.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "view-refresh-symbolic")
self.new_password.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("Generate a password"))
self.new_password.connect("icon-release", self._on_new_password_icon_released)
self.new_password.connect("changed", self._on_passwords_changed)
table.attach(self.new_password, 1, 3, 0, 1)
self.strengh_indicator = Gtk.ProgressBar()
self.strengh_indicator.set_tooltip_text(_("Your new password needs to be at least 8 characters long"))
self.strengh_indicator.set_fraction(0.0)
table.attach(self.strengh_indicator, 1, 2, 1, 2, xoptions=Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL)
self.strengh_indicator.set_size_request(-1, 1)
self.strengh_label = Gtk.Label()
self.strengh_label.set_tooltip_text(_("Your new password needs to be at least 8 characters long"))
self.strengh_label.set_alignment(1, 0.5)
table.attach(self.strengh_label, 2, 3, 1, 2)
self.confirm_password = Gtk.Entry()
self.confirm_password.connect("changed", self._on_passwords_changed)
table.attach(self.confirm_password, 1, 3, 2, 3)
self.show_password = Gtk.CheckButton(_("Show password"))
self.show_password.connect('toggled', self._on_show_password_toggled)
table.attach(self.show_password, 1, 3, 3, 4)
self.set_border_width(6)
box = self.get_content_area()
box.add(table)
self.show_all()
self.infobar = Gtk.InfoBar()
self.infobar.set_message_type(Gtk.MessageType.ERROR)
label = Gtk.Label(_("An error occurred. Your password was not changed."))
content = self.infobar.get_content_area()
content.add(label)
table.attach(self.infobar, 0, 3, 4, 5)
self.add_buttons(_("Cancel"), Gtk.ResponseType.CANCEL, _("Change"), Gtk.ResponseType.OK, )
self.set_passwords_visibility()
self.set_response_sensitive(Gtk.ResponseType.OK, False)
self.infobar.hide()
self.connect("response", self._on_response)
def _on_response(self, dialog, response_id):
if response_id == Gtk.ResponseType.OK:
self.change_password()
else:
self.destroy()
def change_password(self):
newpass = self.new_password.get_text()
self.user.set_password(newpass, "")
mask = self.group_mask.get_text()
if "nopasswdlogin" in mask:
subprocess.call(["gpasswd", "-d", self.user.get_user_name(), "nopasswdlogin"])
mask = mask.split(", ")
mask.remove("nopasswdlogin")
mask = ", ".join(mask)
self.group_mask.set_text(mask)
self.password_mask.set_text('\u2022\u2022\u2022\u2022\u2022\u2022')
self.destroy()
def set_passwords_visibility(self):
visible = self.show_password.get_active()
self.new_password.set_visibility(visible)
self.confirm_password.set_visibility(visible)
def _on_new_password_icon_released(self, widget, icon_pos, event):
self.infobar.hide()
self.show_password.set_active(True)
characters = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-"
newpass = ""
for i in range (8):
index = randint(0, len(characters) -1)
newpass = newpass + characters[index]
self.new_password.set_text(newpass)
self.confirm_password.set_text(newpass)
self.check_passwords()
def _on_show_password_toggled(self, widget):
self.set_passwords_visibility()
# Based on setPasswordStrength() in Mozilla Seamonkey, which is tri-licensed under MPL 1.1, GPL 2.0, and LGPL 2.1.
# Forked from Ubiquity validation.py
def password_strength(self, password):
upper = lower = digit = symbol = 0
for char in password:
if char.isdigit():
digit += 1
elif char.islower():
lower += 1
elif char.isupper():
upper += 1
else:
symbol += 1
length = len(password)
length = min(length,4)
digit = min(digit,3)
upper = min(upper,3)
symbol = min(symbol,3)
strength = (
((length * 0.1) - 0.2) +
(digit * 0.1) +
(symbol * 0.15) +
(upper * 0.1))
if strength > 1:
strength = 1
if strength < 0:
strength = 0
return strength
def _on_passwords_changed(self, widget):
self.infobar.hide()
new_password = self.new_password.get_text()
confirm_password = self.confirm_password.get_text()
strength = self.password_strength(new_password)
if new_password != confirm_password:
self.confirm_password.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "dialog-warning-symbolic")
self.confirm_password.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("Passwords do not match"))
else:
self.confirm_password.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, None)
if len(new_password) < 8:
self.strengh_label.set_text(_("Too short"))
self.strengh_indicator.set_fraction(0.0)
elif strength < 0.5:
self.strengh_label.set_text(_("Weak"))
self.strengh_indicator.set_fraction(0.2)
elif strength < 0.75:
self.strengh_label.set_text(_("Fair"))
self.strengh_indicator.set_fraction(0.4)
elif strength < 0.9:
self.strengh_label.set_text(_("Good"))
self.strengh_indicator.set_fraction(0.6)
else:
self.strengh_label.set_text(_("Strong"))
self.strengh_indicator.set_fraction(1.0)
self.check_passwords()
def check_passwords(self):
new_password = self.new_password.get_text()
confirm_password = self.confirm_password.get_text()
if len(new_password) >= 8 and new_password == confirm_password:
self.set_response_sensitive(Gtk.ResponseType.OK, True)
else:
self.set_response_sensitive(Gtk.ResponseType.OK, False)
class NewUserDialog(Gtk.Dialog):
def __init__ (self, parent = None):
super(NewUserDialog, self).__init__(None, parent)
try:
self.set_modal(True)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
self.set_title("")
self.account_type_combo = Gtk.ComboBoxText()
self.account_type_combo.append_text(_("Standard"))
self.account_type_combo.append_text(_("Administrator"))
self.account_type_combo.set_active(0)
self.realname_entry = Gtk.Entry()
self.realname_entry.connect("changed", self._on_info_changed)
self.username_entry = Gtk.Entry()
self.username_entry.connect("changed", self._on_info_changed)
label = Gtk.Label()
label.set_markup(_("The username must consist of only:\n - lower case letters (a-z)\n - numerals (0-9)\n - '.', '-', and '_' characters"))
table = DimmedTable()
table.add_labels([_("Account Type"), _("Full Name"), _("Username")])
table.add_controls([self.account_type_combo, self.realname_entry, self.username_entry])
self.set_border_width(6)
box = self.get_content_area()
box.add(table)
box.add(label)
self.show_all()
self.add_buttons(_("Cancel"), Gtk.ResponseType.CANCEL, _("Add"), Gtk.ResponseType.OK, )
self.set_response_sensitive(Gtk.ResponseType.OK, False)
except Exception as detail:
print(detail)
def user_exists(self, user_name):
users = AccountsService.UserManager.get_default().list_users()
for user in users:
if user.get_user_name() == user_name:
return True
return False
def _on_info_changed(self, widget):
fullname = self.realname_entry.get_text()
username = self.username_entry.get_text()
valid = True
if re.search('[^a-z0-9_.-]', username):
self.username_entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "dialog-warning-symbolic")
self.username_entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("Invalid username"))
valid = False
elif self.user_exists(username):
self.username_entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "dialog-warning-symbolic")
self.username_entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("A user with the name '%s' already exists.") % username)
valid = False
else:
self.username_entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, None)
if username == "" or fullname == "":
valid = False
self.set_response_sensitive(Gtk.ResponseType.OK, valid)
class GroupsDialog(Gtk.Dialog):
def __init__ (self, username, parent = None):
super(GroupsDialog, self).__init__(None, parent)
try:
self.set_modal(True)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
self.set_title("")
self.set_default_size(200, 480)
scrolled = Gtk.ScrolledWindow()
viewport = Gtk.Viewport()
vbox = Gtk.VBox()
self.checkboxes = []
groups = sorted(grp.getgrall(), key=lambda x: x[0], reverse=False)
for group in groups:
checkbox = Gtk.CheckButton(group[0])
self.checkboxes.append(checkbox)
vbox.add(checkbox)
if username in group[3]:
checkbox.set_active(True)
viewport.add(vbox)
scrolled.add(viewport)
self.set_border_width(6)
box = self.get_content_area()
box.pack_start(scrolled, True, True, 0)
self.show_all()
self.add_buttons(_("Cancel"), Gtk.ResponseType.CANCEL, _("OK"), Gtk.ResponseType.OK, )
except Exception as detail:
print(detail)
def get_selected_groups(self):
groups = []
for checkbox in self.checkboxes:
if checkbox.get_active():
groups.append(checkbox.get_label())
return groups
class Module:
def __init__(self):
try:
self.builder = Gtk.Builder()
self.builder.set_translation_domain('cinnamon') # let it translate!
self.builder.add_from_file("/usr/share/cinnamon/cinnamon-settings-users/cinnamon-settings-users.ui")
self.window = self.builder.get_object("main_window")
self.window.connect("destroy", Gtk.main_quit)
self.window.set_title(_("Users and Groups"))
self.builder.get_object("label_users").set_label(_("Users"))
self.builder.get_object("label_groups").set_label(_("Groups"))
self.builder.get_object("button_add_user").connect("clicked", self.on_user_addition)
self.builder.get_object("button_delete_user").connect("clicked", self.on_user_deletion)
self.builder.get_object("button_add_group").connect("clicked", self.on_group_addition)
self.builder.get_object("button_edit_group").connect("clicked", self.on_group_edition)
self.builder.get_object("button_delete_group").connect("clicked", self.on_group_deletion)
self.users = Gtk.TreeStore(object, GdkPixbuf.Pixbuf, str)
self.users.set_sort_column_id(2, Gtk.SortType.ASCENDING)
self.groups = Gtk.TreeStore(int, str)
self.groups.set_sort_column_id(1, Gtk.SortType.ASCENDING)
self.users_treeview = self.builder.get_object("treeview_users")
self.users_treeview.set_rules_hint(True)
self.groups_treeview = self.builder.get_object("treeview_groups")
self.users_treeview.get_selection().connect("changed", self.on_user_selection)
self.groups_treeview.get_selection().connect("changed", self.on_group_selection)
column = Gtk.TreeViewColumn()
cell = Gtk.CellRendererPixbuf()
column.pack_start(cell, True)
column.add_attribute(cell, 'pixbuf', INDEX_USER_PICTURE)
cell.set_property('ypad', 1)
self.users_treeview.append_column(column)
column = Gtk.TreeViewColumn()
cell = Gtk.CellRendererText()
column.pack_start(cell, True)
column.add_attribute(cell, 'markup', INDEX_USER_DESCRIPTION)
self.users_treeview.append_column(column)
column = Gtk.TreeViewColumn()
cell = Gtk.CellRendererText()
column.pack_start(cell, True)
column.add_attribute(cell, 'text', INDEX_GROUPNAME)
column.set_sort_column_id(1)
self.groups_treeview.append_column(column)
self.builder.get_object("button_delete_user").set_sensitive(False)
self.builder.get_object("button_edit_group").set_sensitive(False)
self.builder.get_object("button_delete_group").set_sensitive(False)
self.face_button = Gtk.Button()
self.face_image = Gtk.Image()
self.face_image.set_size_request(96, 96)
self.face_button.set_image(self.face_image)
self.face_image.set_from_file("/usr/share/cinnamon/faces/user-generic.png")
self.face_button.set_alignment(0.0, 0.5)
self.face_button.set_tooltip_text(_("Click to change the picture"))
self.menu = Gtk.Menu()
separator = Gtk.SeparatorMenuItem()
face_browse_menuitem = Gtk.MenuItem(_("Browse for more pictures..."))
face_browse_menuitem.connect('activate', self._on_face_browse_menuitem_activated)
self.face_button.connect("button-release-event", self.menu_display)
row = 0
col = 0
num_cols = 4
face_dirs = ["/usr/share/cinnamon/faces"]
for face_dir in face_dirs:
if os.path.exists(face_dir):
pictures = sorted(os.listdir(face_dir))
for picture in pictures:
path = os.path.join(face_dir, picture)
file = Gio.File.new_for_path(path)
file_icon = Gio.FileIcon.new(file)
image = Gtk.Image.new_from_gicon (file_icon, Gtk.IconSize.DIALOG)
menuitem = Gtk.MenuItem()
menuitem.add(image)
menuitem.connect('activate', self._on_face_menuitem_activated, path)
self.menu.attach(menuitem, col, col+1, row, row+1)
col = (col+1) % num_cols
if (col == 0):
row = row + 1
row = row + 1
self.menu.attach(separator, 0, 4, row, row+1)
self.menu.attach(face_browse_menuitem, 0, 4, row+2, row+3)
self.account_type_combo = Gtk.ComboBoxText()
self.account_type_combo.append_text(_("Standard"))
self.account_type_combo.append_text(_("Administrator"))
self.account_type_combo.connect("changed", self._on_accounttype_changed)
self.realname_entry = EditableEntry()
self.realname_entry.connect("changed", self._on_realname_changed)
self.realname_entry.set_tooltip_text(_("Click to change the name"))
self.password_mask = Gtk.Label()
self.password_mask.set_alignment(0.0, 0.5)
self.password_button = Gtk.Button()
self.password_button.add(self.password_mask)
self.password_button.set_relief(Gtk.ReliefStyle.NONE)
self.password_button.set_tooltip_text(_("Click to change the password"))
self.password_button.connect('activate', self._on_password_button_clicked)
self.password_button.connect('released', self._on_password_button_clicked)
self.groups_label = Gtk.Label()
self.groups_label.set_line_wrap(True)
self.groups_label.set_alignment(0, 0.5)
self.groups_button = Gtk.Button()
self.groups_button.add(self.groups_label)
self.groups_button.set_relief(Gtk.ReliefStyle.NONE)
self.groups_button.set_tooltip_text(_("Click to change the groups"))
self.groups_button.connect("clicked", self._on_groups_button_clicked)
box = Gtk.Box()
box.pack_start(self.face_button, False, False, 0)
table = DimmedTable()
table.add_labels([_("Picture"), _("Account Type"), _("Name"), _("Password"), _("Groups")])
table.add_controls([box, self.account_type_combo, self.realname_entry, self.password_button, self.groups_button])
self.builder.get_object("box_users").add(table)
self.accountService = AccountsService.UserManager.get_default()
self.accountService.connect('notify::is-loaded', self.on_accounts_service_loaded)
self.load_groups()
self.window.show_all()
self.builder.get_object("box_users").hide()
except Exception as detail:
print(detail)
def _on_password_button_clicked(self, widget):
model, treeiter = self.users_treeview.get_selection().get_selected()
if treeiter != None:
user = model[treeiter][INDEX_USER_OBJECT]
dialog = PasswordDialog(user, self.password_mask, self.groups_label, self.window)
response = dialog.run()
def _on_groups_button_clicked(self, widget):
model, treeiter = self.users_treeview.get_selection().get_selected()
if treeiter != None:
user = model[treeiter][INDEX_USER_OBJECT]
dialog = GroupsDialog(user.get_user_name(), self.window)
response = dialog.run()
if response == Gtk.ResponseType.OK:
groups = dialog.get_selected_groups()
subprocess.call(["usermod", user.get_user_name(), "-G", ",".join(groups)])
groups.sort()
self.groups_label.set_text(", ".join(groups))
dialog.destroy()
def _on_accounttype_changed(self, combobox):
model, treeiter = self.users_treeview.get_selection().get_selected()
if treeiter != None:
user = model[treeiter][INDEX_USER_OBJECT]
if self.account_type_combo.get_active() == 1:
user.set_account_type(AccountsService.UserAccountType.ADMINISTRATOR)
else:
user.set_account_type(AccountsService.UserAccountType.STANDARD)
groups = []
for group in grp.getgrall():
if user.get_user_name() in group[3]:
groups.append(group[0])
groups.sort()
self.groups_label.set_text(", ".join(groups))
def _on_realname_changed(self, widget, text):
model, treeiter = self.users_treeview.get_selection().get_selected()
if treeiter != None:
user = model[treeiter][INDEX_USER_OBJECT]
user.set_real_name(text)
description = "<b>%s</b>\n%s" % (text, user.get_user_name())
model.set_value(treeiter, INDEX_USER_DESCRIPTION, description)
def _on_face_browse_menuitem_activated(self, menuitem):
model, treeiter = self.users_treeview.get_selection().get_selected()
if treeiter != None:
user = model[treeiter][INDEX_USER_OBJECT]
dialog = Gtk.FileChooserDialog(None, None, Gtk.FileChooserAction.OPEN, (_("Cancel"), Gtk.ResponseType.CANCEL, _("Open"), Gtk.ResponseType.OK))
filter = Gtk.FileFilter()
filter.set_name(_("Images"))
filter.add_mime_type("image/*")
dialog.add_filter(filter)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.frame = Gtk.Frame(visible=False, no_show_all=True)
preview = Gtk.Image(visible=True)
box.pack_start(self.frame, False, False, 0)
self.frame.add(preview)
dialog.set_preview_widget(box)
dialog.set_preview_widget_active(True)
dialog.set_use_preview_label(False)
box.set_margin_end(12)
box.set_margin_top(12)
box.set_size_request(128, -1)
dialog.connect("update-preview", self.update_preview_cb, preview)
response = dialog.run()
if response == Gtk.ResponseType.OK:
path = dialog.get_filename()
image = PIL.Image.open(path)
image.thumbnail((96, 96), Image.ANTIALIAS)
face_path = os.path.join(user.get_home_dir(), ".face")
try:
try:
os.remove(face_path)
except OSError:
pass
priv_helper.drop_privs(user)
image.save(face_path, "png")
finally:
priv_helper.restore_privs()
user.set_icon_file(face_path)
self.face_image.set_from_file(face_path)
model.set_value(treeiter, INDEX_USER_PICTURE, GdkPixbuf.Pixbuf.new_from_file_at_size(face_path, 48, 48))
model.row_changed(model.get_path(treeiter), treeiter)
dialog.destroy()
def update_preview_cb (self, dialog, preview):
# Different widths make the dialog look really crappy as it resizes -
# constrain the width and adjust the height to keep perspective.
filename = dialog.get_preview_filename()
if filename is not None:
if os.path.isfile(filename):
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(filename, 128, 128)
if pixbuf is not None:
preview.set_from_pixbuf(pixbuf)
self.frame.show()
return
except GLib.Error as e:
print("Unable to generate preview for file '%s' - %s\n" % (filename, e.message))
preview.clear()
self.frame.hide()
def _on_face_menuitem_activated(self, menuitem, path):
if os.path.exists(path):
model, treeiter = self.users_treeview.get_selection().get_selected()
if treeiter != None:
user = model[treeiter][INDEX_USER_OBJECT]
user.set_icon_file(path)
self.face_image.set_from_file(path)
face_path = os.path.join(user.get_home_dir(), ".face")
try:
try:
os.remove(face_path)
except OSError:
pass
priv_helper.drop_privs(user)
shutil.copy(path, face_path)
finally:
priv_helper.restore_privs()
model.set_value(treeiter, INDEX_USER_PICTURE, GdkPixbuf.Pixbuf.new_from_file_at_size(path, 48, 48))
model.row_changed(model.get_path(treeiter), treeiter)
def menu_display(self, widget, event):
if event.button == 1:
self.menu.popup(None, None, self.popup_menu_below_button, self.face_button, event.button, event.time)
self.menu.show_all()
def popup_menu_below_button (self, *args):
# the introspection for GtkMenuPositionFunc seems to change with each Gtk version,
# this is a workaround to make sure we get the menu and the widget
menu = args[0]
widget = args[-1]
# here I get the coordinates of the button relative to
# window (self.window)
button_x, button_y = widget.get_allocation().x, widget.get_allocation().y
# now convert them to X11-relative
unused_var, window_x, window_y = widget.get_window().get_origin()
x = window_x + button_x
y = window_y + button_y
# now move the menu below the button
y += widget.get_allocation().height
push_in = True # push_in is True so all menu is always inside screen
return (x, y, push_in)
def on_accounts_service_loaded(self, user, param):
self.load_users()
def load_users(self):
self.users.clear()
users = self.accountService.list_users()
for user in users:
if os.path.exists(user.get_icon_file()):
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(user.get_icon_file(), 48, 48)
else:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size("/usr/share/cinnamon/faces/user-generic.png", 48, 48)
description = "<b>%s</b>\n%s" % (user.get_real_name(), user.get_user_name())
piter = self.users.append(None, [user, pixbuf, description])
self.users_treeview.set_model(self.users)
def load_groups(self):
self.groups.clear()
groups = sorted(grp.getgrall(), key=lambda x: x[0], reverse=False)
for group in groups:
(gr_name, gr_passwd, gr_gid, gr_mem) = group
piter = self.groups.append(None, [gr_gid, gr_name])
self.groups_treeview.set_model(self.groups)
#USER CALLBACKS
def on_user_selection(self, selection):
self.password_button.set_sensitive(True)
self.password_button.set_tooltip_text("")
model, treeiter = selection.get_selected()
if treeiter != None:
user = model[treeiter][INDEX_USER_OBJECT]
self.builder.get_object("button_delete_user").set_sensitive(True)
self.realname_entry.set_text(user.get_real_name())
if user.get_password_mode() == AccountsService.UserPasswordMode.REGULAR:
self.password_mask.set_text('\u2022\u2022\u2022\u2022\u2022\u2022')
elif user.get_password_mode() == AccountsService.UserPasswordMode.NONE:
self.password_mask.set_markup("<b>%s</b>" % _("No password set"))
else:
self.password_mask.set_text(_("Set at login"))
if user.get_account_type() == AccountsService.UserAccountType.ADMINISTRATOR:
self.account_type_combo.set_active(1)
else:
self.account_type_combo.set_active(0)
pixbuf = None
path = user.get_icon_file()
message = ""
if os.path.exists(path):
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(path)
except GLib.Error as e:
message = "Could not load pixbuf from '%s': %s" % (path, e.message)
error = True
if pixbuf != None:
if pixbuf.get_height() > 96 or pixbuf.get_width() > 96:
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(path, 96, 96)
except GLib.Error as e:
message = "Could not scale pixbuf from '%s': %s" % (path, e.message)
error = True
if pixbuf:
self.face_image.set_from_pixbuf(pixbuf)
else:
if message != "":
print(message)
self.face_image.set_from_file("/usr/share/cinnamon/faces/user-generic.png")
groups = []
for group in grp.getgrall():
if user.get_user_name() in group[3]:
groups.append(group[0])
groups.sort()
self.groups_label.set_text(", ".join(groups))
self.builder.get_object("box_users").show()
# Count the number of connections for the currently logged-in user
connections = int(subprocess.check_output(["w", "-hs", user.get_user_name()]).decode("utf-8").count("\n"))
if connections > 0:
self.builder.get_object("button_delete_user").set_sensitive(False)
self.builder.get_object("button_delete_user").set_tooltip_text(_("This user is currently logged in"))
else:
self.builder.get_object("button_delete_user").set_sensitive(True)
self.builder.get_object("button_delete_user").set_tooltip_text("")
if os.path.exists("/home/.ecryptfs/%s" % user.get_user_name()):
self.password_button.set_sensitive(False)
self.password_button.set_tooltip_text(_("The user's home directory is encrypted. To preserve access to the encrypted directory, only the user should change this password."))
else:
self.builder.get_object("button_delete_user").set_sensitive(False)
self.builder.get_object("box_users").hide()
def on_user_deletion(self, event):
model, treeiter = self.users_treeview.get_selection().get_selected()
if treeiter != None:
user = model[treeiter][INDEX_USER_OBJECT]
message = _("Are you sure you want to permanently delete %s and all the files associated with this user?") % user.get_user_name()
d = Gtk.MessageDialog(self.window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.YES_NO,
message)
d.set_markup(message)
d.set_default_response(Gtk.ResponseType.NO)
r = d.run()
d.destroy()
if r == Gtk.ResponseType.YES:
result = self.accountService.delete_user(user, True)
if result:
model.remove(treeiter)
self.load_groups()
def on_user_addition(self, event):
dialog = NewUserDialog(self.window)
response = dialog.run()
if response == Gtk.ResponseType.OK:
if dialog.account_type_combo.get_active() == 1:
account_type = AccountsService.UserAccountType.ADMINISTRATOR
else:
account_type = AccountsService.UserAccountType.STANDARD
fullname = dialog.realname_entry.get_text()
username = dialog.username_entry.get_text()
new_user = self.accountService.create_user(username, fullname, account_type)
new_user.set_password_mode(AccountsService.UserPasswordMode.NONE)
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size("/usr/share/cinnamon/faces/user-generic.png", 48, 48)
description = "<b>%s</b>\n%s" % (fullname, username)
piter = self.users.append(None, [new_user, pixbuf, description])
# Add the user to his/her own group and sudo if Administrator was selected
if dialog.account_type_combo.get_active() == 1:
subprocess.call(["usermod", username, "-G", "%s,sudo,nopasswdlogin" % username])
else:
subprocess.call(["usermod", username, "-G", "%s,nopasswdlogin" % username])
self.load_groups()
dialog.destroy()
def on_user_edition(self, event):
model, treeiter = self.users_treeview.get_selection().get_selected()
if treeiter != None:
print("Editing user %s" % model[treeiter][INDEX_USER_OBJECT].get_user_name())
# GROUPS CALLBACKS
def on_group_selection(self, selection):
model, treeiter = selection.get_selected()
if treeiter != None:
self.builder.get_object("button_edit_group").set_sensitive(True)
self.builder.get_object("button_delete_group").set_sensitive(True)
self.builder.get_object("button_delete_group").set_tooltip_text("")
group = model[treeiter][INDEX_GROUPNAME]
for p in pwd.getpwall():
username = p[0]
primary_group = grp.getgrgid(p[3])[0]
if primary_group == group:
self.builder.get_object("button_delete_group").set_sensitive(False)
self.builder.get_object("button_delete_group").set_tooltip_text(_("This group is set as %s's primary group") % username)
break
else:
self.builder.get_object("button_edit_group").set_sensitive(False)
self.builder.get_object("button_delete_group").set_sensitive(False)
self.builder.get_object("button_delete_group").set_tooltip_text("")
def on_group_deletion(self, event):
model, treeiter = self.groups_treeview.get_selection().get_selected()
if treeiter != None:
group = model[treeiter][INDEX_GROUPNAME]
message = _("Are you sure you want to permanently delete %s?") % group
d = Gtk.MessageDialog(self.window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.YES_NO,
message)
d.set_markup(message)
d.set_default_response(Gtk.ResponseType.NO)
r = d.run()
if r == Gtk.ResponseType.YES:
subprocess.call(["groupdel", group])
self.load_groups()
d.destroy()
def on_group_addition(self, event):
dialog = GroupDialog(_("Group Name"), "", self.window)
response = dialog.run()
if response == Gtk.ResponseType.OK:
subprocess.call(["groupadd", dialog.entry.get_text().lower()])
self.load_groups()
dialog.destroy()
def on_group_edition(self, event):
model, treeiter = self.groups_treeview.get_selection().get_selected()
if treeiter != None:
group = model[treeiter][INDEX_GROUPNAME]
dialog = GroupDialog(_("Group Name"), group, self.window)
response = dialog.run()
if response == Gtk.ResponseType.OK:
subprocess.call(["groupmod", group, "-n", dialog.entry.get_text().lower()])
self.load_groups()
dialog.destroy()
if __name__ == "__main__":
setproctitle("cinnamon-settings-users")
module = Module()
Gtk.main()
| gpl-2.0 |
erkanay/django | django/contrib/gis/geos/__init__.py | 151 | 1154 | """
The GeoDjango GEOS module. Please consult the GeoDjango documentation
for more details:
http://geodjango.org/docs/geos.html
"""
__all__ = ['HAS_GEOS']
try:
from .libgeos import geos_version, geos_version_info # NOQA: flake8 detects only the last __all__
HAS_GEOS = True
__all__ += ['geos_version', 'geos_version_info']
except ImportError:
HAS_GEOS = False
if HAS_GEOS:
from .geometry import GEOSGeometry, wkt_regex, hex_regex
from .point import Point
from .linestring import LineString, LinearRing
from .polygon import Polygon
from .collections import GeometryCollection, MultiPoint, MultiLineString, MultiPolygon
from .error import GEOSException, GEOSIndexError
from .io import WKTReader, WKTWriter, WKBReader, WKBWriter
from .factory import fromfile, fromstr
__all__ += [
'GEOSGeometry', 'wkt_regex', 'hex_regex', 'Point', 'LineString',
'LinearRing', 'Polygon', 'GeometryCollection', 'MultiPoint',
'MultiLineString', 'MultiPolygon', 'GEOSException', 'GEOSIndexError',
'WKTReader', 'WKTWriter', 'WKBReader', 'WKBWriter', 'fromfile',
'fromstr',
]
| bsd-3-clause |
mcking49/apache-flask | Python/Lib/site-packages/flask_socketio/__init__.py | 2 | 10875 | import os
import sys
from socketio import socketio_manage
from socketio.server import SocketIOServer
from socketio.namespace import BaseNamespace
from flask import request, session, json
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from werkzeug._internal import _log
from test_client import SocketIOTestClient
class SocketIOMiddleware(object):
def __init__(self, app, socket):
self.app = app
if app.debug:
app.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True)
self.wsgi_app = app.wsgi_app
self.socket = socket
def __call__(self, environ, start_response):
path = environ['PATH_INFO'].strip('/')
if path is not None and path.startswith('socket.io'):
if 'socketio' not in environ:
raise RuntimeError('You need to use a gevent-socketio server.')
socketio_manage(environ, self.socket.get_namespaces(), self.app,
json_loads=json.loads, json_dumps=json.dumps)
else:
return self.wsgi_app(environ, start_response)
class SocketIO(object):
def __init__(self, app=None):
if app:
self.init_app(app)
self.messages = {}
self.rooms = {}
self.server = None
self.exception_handlers = {}
self.default_exception_handler = None
def init_app(self, app):
app.wsgi_app = SocketIOMiddleware(app, self)
def get_namespaces(self, base_namespace=BaseNamespace):
class GenericNamespace(base_namespace):
socketio = self
base_emit = base_namespace.emit
base_send = base_namespace.send
def initialize(self):
self.rooms = set()
def process_event(self, packet):
if self.socketio.server is None:
self.socketio.server = self.environ['socketio'].server
message = packet['name']
args = packet['args']
app = self.request
return self.socketio._dispatch_message(app, self, message, args)
def join_room(self, room):
if self.socketio._join_room(self, room):
self.rooms.add(room)
def leave_room(self, room):
if self.socketio._leave_room(self, room):
self.rooms.remove(room)
def recv_connect(self):
if self.socketio.server is None:
self.socketio.server = self.environ['socketio'].server
ret = super(GenericNamespace, self).recv_connect()
app = self.request
self.socketio._dispatch_message(app, self, 'connect')
return ret
def recv_disconnect(self):
if self.socketio.server is None:
self.socketio.server = self.environ['socketio'].server
app = self.request
self.socketio._dispatch_message(app, self, 'disconnect')
for room in self.rooms.copy():
self.leave_room(room)
return super(GenericNamespace, self).recv_disconnect()
def recv_message(self, data):
if self.socketio.server is None:
self.socketio.server = self.environ['socketio'].server
app = self.request
return self.socketio._dispatch_message(app, self, 'message', [data])
def recv_json(self, data):
if self.socketio.server is None:
self.socketio.server = self.environ['socketio'].server
app = self.request
return self.socketio._dispatch_message(app, self, 'json', [data])
def emit(self, event, *args, **kwargs):
ns_name = kwargs.pop('namespace', None)
broadcast = kwargs.pop('broadcast', False)
room = kwargs.pop('room', None)
if broadcast or room:
if ns_name is None:
ns_name = self.ns_name
return self.socketio.emit(event, *args, namespace=ns_name, room=room)
if ns_name is None:
return self.base_emit(event, *args, **kwargs)
return request.namespace.socket[ns_name].base_emit(event, *args, **kwargs)
def send(self, message, json=False, ns_name=None, callback=None,
broadcast=False, room=None):
if broadcast or room:
if ns_name is None:
ns_name = self.ns_name
return self.socketio.send(message, json, ns_name, room)
if ns_name is None:
return request.namespace.base_send(message, json, callback)
return request.namespace.socket[ns_name].base_send(message, json, callback)
namespaces = dict( (ns_name, GenericNamespace) for ns_name in self.messages)
return namespaces
def _dispatch_message(self, app, namespace, message, args=[]):
if namespace.ns_name not in self.messages:
return
if message not in self.messages[namespace.ns_name]:
return
with app.request_context(namespace.environ):
request.namespace = namespace
for k, v in namespace.session.items():
session[k] = v
ret = self.messages[namespace.ns_name][message](*args)
for k, v in session.items():
namespace.session[k] = v
return ret
def _join_room(self, namespace, room):
if namespace.ns_name not in self.rooms:
self.rooms[namespace.ns_name] = {}
if room not in self.rooms[namespace.ns_name]:
self.rooms[namespace.ns_name][room] = set()
if namespace not in self.rooms[namespace.ns_name][room]:
self.rooms[namespace.ns_name][room].add(namespace)
return True
return False
def _leave_room(self, namespace, room):
if namespace.ns_name in self.rooms:
if room in self.rooms[namespace.ns_name]:
if namespace in self.rooms[namespace.ns_name][room]:
self.rooms[namespace.ns_name][room].remove(namespace)
if len(self.rooms[namespace.ns_name][room]) == 0:
del self.rooms[namespace.ns_name][room]
if len(self.rooms[namespace.ns_name]) == 0:
del self.rooms[namespace.ns_name]
return True
return False
def on_message(self, message, handler, namespace=''):
if namespace not in self.messages:
self.messages[namespace] = {}
self.messages[namespace][message] = handler
def on(self, message, namespace=''):
if namespace in self.exception_handlers or self.default_exception_handler is not None:
def decorator(f):
def func(*args, **kwargs):
try:
f(*args, **kwargs)
except:
handler = self.exception_handlers.get(namespace,
self.default_exception_handler)
type, value, traceback = sys.exc_info()
handler(value)
self.on_message(message, func, namespace)
return func
else:
def decorator(f):
self.on_message(message, f, namespace)
return f
return decorator
def on_error(self, namespace=''):
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return decorator
def on_error_default(self, exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
def emit(self, event, *args, **kwargs):
ns_name = kwargs.pop('namespace', '')
room = kwargs.pop('room', None)
if room is not None:
for client in self.rooms.get(ns_name, {}).get(room, set()):
client.base_emit(event, *args, **kwargs)
elif self.server:
for sessid, socket in self.server.sockets.items():
if socket.active_ns.get(ns_name):
socket[ns_name].base_emit(event, *args, **kwargs)
def send(self, message, json=False, namespace=None, room=None):
ns_name = namespace
if ns_name is None:
ns_name = ''
if room:
for client in self.rooms.get(ns_name, {}).get(room, set()):
client.base_send(message, json)
else:
if self.server:
for sessid, socket in self.server.sockets.items():
if socket.active_ns.get(ns_name):
socket[ns_name].base_send(message, json)
def run(self, app, host=None, port=None, **kwargs):
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
# don't allow override of resource, otherwise allow SocketIOServer
# kwargs to be passed through
kwargs.pop('resource', None)
self.server = SocketIOServer((host, port), app.wsgi_app,
resource='socket.io', **kwargs)
if app.debug:
def run_server():
self.server.serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
_log('info', ' * Running on http://%s:%d/' % (host, port))
run_with_reloader(run_server)
else:
self.server.serve_forever()
def test_client(self, app, namespace=None):
return SocketIOTestClient(app, self, namespace)
def emit(event, *args, **kwargs):
return request.namespace.emit(event, *args, **kwargs)
def send(message, json=False, namespace=None, callback=None, broadcast=False, room=None):
return request.namespace.send(message, json, namespace, callback, broadcast, room)
def join_room(room):
return request.namespace.join_room(room)
def leave_room(room):
return request.namespace.leave_room(room)
def error(error_name, error_message, msg_id=None, quiet=False):
return request.namespace.error(error_name, error_message, msg_id, quiet)
def disconnect(silent=False):
return request.namespace.disconnect(silent)
| mit |
jlnaudin/x-drone | MissionPlanner-master/packages/IronPython.StdLib.2.7.4/content/Lib/ast.py | 255 | 11805 | # -*- coding: utf-8 -*-
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from _ast import __version__
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return compile(source, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, Str):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, Num) and \
isinstance(node.right.n, complex) and \
isinstance(node.left, Num) and \
isinstance(node.left.n, (int, long, float)):
left = node.left.n
right = node.right.n
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed string')
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| gpl-3.0 |
nickmarton/Vivid | vivid/classes/parsers/point_parser.py | 1 | 2439 | """This section introduces the PointParser class."""
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from point import Point
class PointParser(object):
"""
PointParser class. The PointParser class is used for parsing Point object
related expressions.
:ivar _is_Parser: An identifier to use in place of ``type`` or \
``isinstance``.
"""
def __init__(self):
"""
Construct a PointParser object.
"""
self._is_Parser = True
def __call__(self, *args):
"""
Call PointParser object (e.g., ``PointParser(expression)``).
"""
return self._eval(*args)
def _eval(self, string):
"""
Try to evaluate given string
(e.g., "``is_on(P(2.0,2.0),P(1.0,1.0),P(3.0,3.0))``").
:param string: The expression to evaluate; the PointParser object \
unstringifies Point objects in ``string`` parameter and tries to call \
a function of the Point object (also given by ``string`` parameter) \
with unstringified Points as arguments.
:type string: ``str``
:raises ValueError: Function provided in ``string`` parameter is not \
a function in the Point class, some argument is not a Point after \
trying to unstringify or the ``string`` parameter is improperly \
formatted.
"""
fn_start, fn_end = string.find("("), string.rfind(")")
fn_name, fn_args = string[:fn_start], string[fn_start + 1: fn_end]
for fn in dir(Point):
if fn_name == fn:
point_function = getattr(Point, fn)
break
else:
raise ValueError("Function not contained in dir of Point")
import re
parsed_args = []
point_pattern = r'P\(-?\d\.\d+(,-?\d\.\d+)*\)|P\(x(,x)*\)'
match_obj_iter = re.finditer(point_pattern, fn_args)
for match in match_obj_iter:
parsed_args.append(Point.unstringify(match.group()))
fn_args = fn_args.replace(match.group(), '', 1)
if not all([char == "," for char in fn_args]):
raise ValueError("Only Point arguments acceptable")
try:
return point_function(*parsed_args)
except Exception, e:
raise ValueError("Bad args provided")
def main():
"""."""
pass
if __name__ == "__main__":
main()
| mit |
ryankanno/froide | froide/foirequest/south_migrations/0013_auto__add_field_foimessage_status.py | 6 | 16723 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from froide.helper.auth_migration_util import USER_DB_NAME
APP_MODEL, APP_MODEL_NAME = 'account.User', 'account.user'
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FoiMessage.status'
db.add_column('foirequest_foimessage', 'status', self.gf('django.db.models.fields.CharField')(default=None, max_length=50, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'FoiMessage.status'
db.delete_column('foirequest_foimessage', 'status')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
APP_MODEL_NAME: {
'Meta': {'object_name': 'User', 'db_table': "'%s'" % USER_DB_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'foirequest.foiattachment': {
'Meta': {'ordering': "('name',)", 'object_name': 'FoiAttachment'},
'belongs_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foirequest.FoiMessage']", 'null': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'foirequest.foievent': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'FoiEvent'},
'context_json': ('django.db.models.fields.TextField', [], {}),
'event_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBody']", 'null': 'True', 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foirequest.FoiRequest']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % APP_MODEL, 'null': 'True', 'blank': 'True'})
},
'foirequest.foimessage': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'FoiMessage'},
'html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_postal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_response': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'original': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'plaintext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_public_body': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'received_messages'", 'null': 'True', 'to': "orm['publicbody.PublicBody']"}),
'redacted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foirequest.FoiRequest']"}),
'sender_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sender_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sender_public_body': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'send_messages'", 'null': 'True', 'to': "orm['publicbody.PublicBody']"}),
'sender_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % APP_MODEL, 'null': 'True', 'blank': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'})
},
'foirequest.foirequest': {
'Meta': {'ordering': "('last_message',)", 'object_name': 'FoiRequest'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'costs': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_message': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_foi': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_message': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.FoiLaw']", 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBody']", 'null': 'True', 'blank': 'True'}),
'refusal_reason': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'resolution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resolved_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'secret_address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % APP_MODEL, 'null': 'True'}),
'visibility': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'foirequest.publicbodysuggestion': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'PublicBodySuggestion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBody']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foirequest.FoiRequest']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % APP_MODEL, 'null': 'True'})
},
'publicbody.foilaw': {
'Meta': {'object_name': 'FoiLaw'},
'combined': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['publicbody.FoiLaw']", 'symmetrical': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'letter_end': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'letter_start': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'max_response_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_response_time_unit': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'meta': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '3'}),
'refusal_reasons': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['sites.Site']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'publicbody.publicbody': {
'Meta': {'object_name': 'PublicBody'},
'_created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'public_body_creators'", 'null': 'True', 'to': "orm['%s']" % APP_MODEL}),
'_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'public_body_updaters'", 'null': 'True', 'to': "orm['%s']" % APP_MODEL}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classification': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'classification_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'laws': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['publicbody.FoiLaw']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'number_of_requests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'children'", 'null': 'True', 'blank': 'True', 'to': "orm['publicbody.PublicBody']"}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'descendants'", 'null': 'True', 'blank': 'True', 'to': "orm['publicbody.PublicBody']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['sites.Site']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBodyTopic']", 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website_dump': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'publicbody.publicbodytopic': {
'Meta': {'object_name': 'PublicBodyTopic'},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['foirequest']
| mit |
github-account-because-they-want-it/django-extensions | tests/test_runscript.py | 23 | 1184 | from django.core.management import call_command
from django.test import TestCase
import django
import pytest
import six
import sys
class RunScriptTests(TestCase):
def setUp(self):
sys.stdout = six.StringIO()
sys.stderr = six.StringIO()
def test_runs(self):
# lame test...does it run?
call_command('runscript', 'sample_script', verbosity=2)
self.assertIn("Found script 'tests.testapp.scripts.sample_script'", sys.stdout.getvalue())
self.assertIn("Running script 'tests.testapp.scripts.sample_script'", sys.stdout.getvalue())
@pytest.mark.skipif(
django.VERSION < (1, 7),
reason="AppConfig and modify_settings appeared in 1.7"
)
def test_runs_appconfig(self):
with self.modify_settings(INSTALLED_APPS={
'append': 'tests.testapp.apps.TestAppConfig',
'remove': 'tests.testapp',
}):
call_command('runscript', 'sample_script', verbosity=2)
self.assertIn("Found script 'tests.testapp.scripts.sample_script'", sys.stdout.getvalue())
self.assertIn("Running script 'tests.testapp.scripts.sample_script'", sys.stdout.getvalue())
| mit |
cpennington/XBlock | xblock/slider.py | 13 | 1976 | """Simple XBlock with a slider interface.
WARNING: This is an experimental module, subject to future change or removal.
"""
import json
from webob import Response
from xblock.core import XBlock
from xblock.fields import Scope, Integer
from xblock.fragment import Fragment
class Slider(XBlock):
"""Base XBlock with a slider interface."""
min_value = Integer(help="Minimum value", default=0, scope=Scope.content)
max_value = Integer(help="Maximum value", default=100, scope=Scope.content)
value = Integer(help="Student value", default=0, scope=Scope.user_state)
def student_view(self, context): # pylint: disable=W0613
"""Provide the default student view."""
html = SLIDER_TEMPLATE.format(min=self.min_value,
max=self.max_value,
val=self.value)
frag = Fragment(html)
frag.add_css("input[type=range] { width=100px; }")
frag.add_javascript(SLIDER_JS)
frag.initialize_js('Slider')
return frag
def update(self, request):
"""Update upon request."""
data = json.loads(request.body)
self.value = int(data['value'])
return Response()
SLIDER_TEMPLATE = u"""
<input type="range" min="{min}" max="{max}" value="{val}"/> <span> {val} </span>
"""
SLIDER_JS = """
function Slider(runtime, element) {
if (!(this instanceof Slider)) {
return new Slider(runtime, element);
}
this.handlerUrl = runtime.handlerUrl(element, 'update');
this.input = $(element).children('input[type="range"]');
this.output = $(element).children('span');
var self = this;
self.input.on('change', function () {
self.output.html(this.value);
});
self.input.on('mouseup', function () {
$.post(self.handlerUrl, JSON.stringify({value: this.value}));
});
};
Slider.prototype.submit = function() {
return this.input.val();
};
Slider.prototype.handleSubmit = function(result) {
};
"""
| agpl-3.0 |
wjn740/linux | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
jeromecc/docker-fhio-yakkety | tty.js/node_modules/tty.js/node_modules/pty.js/deps/winpty/misc/DebugClient.py | 42 | 1558 | #!python
# Run with native CPython. Needs pywin32 extensions.
# Copyright (c) 2011-2012 Ryan Prichard
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import winerror
import win32pipe
import win32file
import win32api
import sys
import pywintypes
import time
if len(sys.argv) != 2:
print("Usage: %s message" % sys.argv[0])
sys.exit(1)
message = "[%05.3f %s]: %s" % (time.time() % 100000, sys.argv[0], sys.argv[1])
win32pipe.CallNamedPipe(
"\\\\.\\pipe\\DebugServer",
message.encode(),
16,
win32pipe.NMPWAIT_WAIT_FOREVER)
| apache-2.0 |
jeffbryner/MozDef | tests/alerts/test_proxy_drop_executable.py | 3 | 4404 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from .positive_alert_test_case import PositiveAlertTestCase
from .negative_alert_test_case import NegativeAlertTestCase
from .alert_test_suite import AlertTestSuite
class TestAlertProxyDropExecutable(AlertTestSuite):
alert_filename = "proxy_drop_executable"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_source": {
"category": "proxy",
"details": {
"sourceipaddress": "1.2.3.4",
"destination": "http://evil.com/evil.exe",
"proxyaction": "TCP_DENIED",
},
},
}
# This event is an alternate destination that we'd want to aggregate
default_event2 = AlertTestSuite.copy(default_event)
default_event2["_source"]["details"]["destination"] = "http://evil.com/evil.sh"
# This event is the default negative event that will not cause the
# alert to trigger
default_negative_event = AlertTestSuite.copy(default_event)
default_negative_event["_source"]["details"][
"destination"
] = "http://foo.mozilla.com/index.html"
# This alert is the expected result from running this task
default_alert = {
"category": "squid",
"tags": ["squid", "proxy"],
"severity": "WARNING",
"summary": "Suspicious Proxy DROP event(s) detected from 1.2.3.4 to the following executable file destination(s): http://evil.com/evil.exe",
}
# This alert is the expected result from this task against multiple matching events
default_alert_aggregated = AlertTestSuite.copy(default_alert)
default_alert_aggregated[
"summary"
] = "Suspicious Proxy DROP event(s) detected from 1.2.3.4 to the following executable file destination(s): http://evil.com/evil.exe,http://evil.com/evil.sh"
test_cases = []
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=AlertTestSuite.create_events(default_event, 1),
expected_alert=default_alert,
)
)
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected - dedup",
events=AlertTestSuite.create_events(default_event, 2),
expected_alert=default_alert,
)
)
events1 = AlertTestSuite.create_events(default_event, 1)
events2 = AlertTestSuite.create_events(default_event2, 1)
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected - different dests",
events=events1 + events2,
expected_alert=default_alert_aggregated,
)
)
test_cases.append(
NegativeAlertTestCase(
description="Negative test with default negative event",
events=AlertTestSuite.create_events(default_negative_event, 1),
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event["_source"]["category"] = "bad"
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect category",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event["_source"][
"utctimestamp"
] = AlertTestSuite.subtract_from_timestamp_lambda({"minutes": 241})
event["_source"][
"receivedtimestamp"
] = AlertTestSuite.subtract_from_timestamp_lambda({"minutes": 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp", events=events
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event["_source"]["details"]["destination"] = "http://evil.com/evil.pdf"
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with non blacklisted extension",
events=events,
)
)
| mpl-2.0 |
meyerbe/pycles | generate_namelist.py | 1 | 70404 | import argparse
import json
import pprint
from sys import exit
import uuid
import ast
def main():
parser = argparse.ArgumentParser(prog='Namelist Generator')
parser.add_argument('case_name')
# Optional Arguments for CGILS
parser.add_argument('--perturbed_temperature', default='False',
help='Specify if perturbed temperature case is to be run (CGILS) as True/False')
parser.add_argument('--control_subsidence', default='False',
help='Specify if control subsidence is to be used in perturbed runs (CGILS) as True/False')
parser.add_argument('--zgils_location', default='False',
help='specify location (6/11/12)')
args = parser.parse_args()
case_name = args.case_name
#Optional Arguments for CGILS
is_p2 = ast.literal_eval(args.perturbed_temperature)
is_ctl_omega = ast.literal_eval(args.control_subsidence)
zgils_loc = ast.literal_eval(args.zgils_location)
print(zgils_loc)
if case_name == 'StableBubble':
namelist = StableBubble()
elif case_name == 'SaturatedBubble':
namelist = SaturatedBubble()
elif case_name == 'ColdPoolDry_single_3D':
namelist = ColdPoolDry_3D('single')
elif case_name == 'ColdPoolDry_double_3D':
namelist = ColdPoolDry_3D('double')
elif case_name == 'ColdPoolDry_triple_3D':
namelist = ColdPoolDry_3D('triple')
elif case_name == 'SullivanPatton':
namelist = SullivanPatton()
elif case_name == 'Bomex':
namelist = Bomex()
elif case_name == 'Gabls':
namelist = Gabls()
elif case_name == 'DYCOMS_RF01':
namelist = DYCOMS_RF01()
elif case_name == 'DYCOMS_RF02':
namelist = DYCOMS_RF02()
elif case_name == 'SMOKE':
namelist = SMOKE()
elif case_name == 'Rico':
namelist = Rico()
elif case_name == 'Isdac':
namelist = Isdac()
elif case_name == 'IsdacCC':
namelist = IsdacCC()
elif case_name == 'Mpace':
namelist = Mpace()
elif case_name == 'Sheba':
namelist = Sheba()
elif case_name == 'CGILS_S6':
namelist = CGILS_S6(is_p2, is_ctl_omega)
elif case_name == 'CGILS_S11':
namelist = CGILS_S11(is_p2, is_ctl_omega)
elif case_name == 'CGILS_S12':
namelist = CGILS_S12(is_p2, is_ctl_omega)
elif case_name == 'ZGILS':
namelist = ZGILS(zgils_loc)
elif case_name == 'DCBLSoares':
namelist = DCBLSoares()
elif case_name == 'DCBLSoares_moist':
namelist = DCBLSoares_moist()
else:
print('Not a valid case name')
exit()
write_file(namelist)
def SullivanPatton():
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 32
namelist['grid']['ny'] = 32
namelist['grid']['nz'] = 32
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 160.0
namelist['grid']['dy'] = 160.0
namelist['grid']['dz'] = 64.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 10.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 7200.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant'
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'None_Dry'
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.02
namelist['damping']['Rayleigh']['z_d'] = 500.0
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['TKE']
namelist['stats_io']['frequency'] = 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 1800.0
namelist['fields_io']['diagnostic_fields'] = ['temperature','buoyancy_frequency','viscosity']
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 600.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
namelist['meta'] = {}
namelist['meta']['simname'] = 'SullivanPatton'
namelist['meta']['casename'] = 'SullivanPatton'
return namelist
def ColdPoolDry_3D(number):
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 200
namelist['grid']['ny'] = 200
namelist['grid']['nz'] = 120 # height of 12km is sufficient (for dTh3K_z1000_r1000)
namelist['grid']['gw'] = 5
namelist['grid']['dx'] = 100.0
namelist['grid']['dy'] = 100.0
namelist['grid']['dz'] = 100.0
namelist['init'] = {}
namelist['init']['dTh'] = 3.0 # temperature anomaly
namelist['init']['shape'] = 1 # shape of temperature anomaly: 1 = cos2-shape
namelist['init']['h'] = 2000.0 # initial height of temperature anomaly
namelist['init']['r'] = rstar # initial radius of temperature anomaly
namelist['init']['marg'] = 500. # width or margin (transition for temeprature anomaly)
if number == 'single':
namelist['init']['ic'] = namelist['grid']['nx'] / 2
namelist['init']['jc'] = namelist['grid']['ny'] / 2
elif number == 'double':
namelist['init']['sep'] = d # separation of CPs
# (ic, jc): point of collision; CP coordinates: (ic+-sep/2, jc)
elif number == 'triple':
namelist['init']['d'] = d # separation of CPs in equilateral triangle
namelist['init']['ic'] = np.int(np.double(namelist['grid']['nx']) / 2)
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.3
namelist['time_stepping']['dt_initial'] = 10.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 3600.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant'
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'None_Dry'
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
# namelist['sgs']['scheme'] = 'UniformViscosity'
# namelist['sgs']['UniformViscosity'] = {}
# namelist['sgs']['UniformViscosity']['viscosity'] = 0.0
# namelist['sgs']['UniformViscosity']['diffusivity'] = 0.0
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh' #'None'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.2
namelist['damping']['Rayleigh']['z_d'] = 600
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['conditional_stats'] = {}
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['None']
namelist['stats_io']['frequency'] = 100.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 100.0
# namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy_frequency','viscosity']
namelist['fields_io']['diagnostic_fields'] = ['temperature', 'theta']
namelist['meta'] = {}
if number == 'single':
namelist['meta']['casename'] = 'ColdPoolDry_single_3D'
namelist['meta']['simname'] = 'ColdPoolDry_single_3D'
elif number == 'double':
namelist['meta']['casename'] = 'ColdPoolDry_double_3D'
namelist['meta']['simname'] = 'ColdPoolDry_double_3D'
elif number == 'triple':
namelist['meta']['casename'] = 'ColdPoolDry_triple_3D'
namelist['meta']['simname'] = 'ColdPoolDry_triple_3D'
namelist['surface'] = {}
# schemes: 'none', 'bulk', 'const'
namelist['surface']['scheme'] = 'none'
namelist['visualization'] = {}
namelist['visualization']['frequency'] = 10000.0
namelist['tracers'] = {}
namelist['tracers']['use_tracers'] = 'passive'
# 1: same tracer in whole domain; 2: different tracer in initial anomaly vs. environment
namelist['tracers']['number'] = 1
namelist['tracers']['kmin'] = 0
namelist['tracers']['kmax'] = 10
return namelist
def SaturatedBubble():
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 100
namelist['grid']['ny'] = 5
namelist['grid']['nz'] = 50
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 200.0
namelist['grid']['dy'] = 200.0
namelist['grid']['dz'] = 200.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.3
namelist['time_stepping']['dt_initial'] = 10.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 1000.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant'
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'None_SA'
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'UniformViscosity'
namelist['sgs']['UniformViscosity'] = {}
namelist['sgs']['UniformViscosity']['viscosity'] = 0.0
namelist['sgs']['UniformViscosity']['diffusivity'] = 0.0
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'None'
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['conditional_stats'] = {}
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['None']
namelist['stats_io']['frequency'] = 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 100.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy_frequency','viscosity']
namelist['meta'] = {}
namelist['meta']['casename'] = 'SaturatedBubble'
namelist['meta']['simname'] = 'SaturatedBubble'
return namelist
def StableBubble():
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 512
namelist['grid']['ny'] = 7
namelist['grid']['nz'] = 64
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 100.0
namelist['grid']['dy'] = 100.0
namelist['grid']['dz'] = 100.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 10.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 1000.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant'
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'None_Dry'
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'UniformViscosity'
namelist['sgs']['UniformViscosity'] = {}
namelist['sgs']['UniformViscosity']['viscosity'] = 75.0
namelist['sgs']['UniformViscosity']['diffusivity'] = 75.0
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'None'
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['conditional_stats'] = {}
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['None']
namelist['stats_io']['frequency'] = 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 100.0
namelist['fields_io']['diagnostic_fields'] = ['temperature','buoyancy_frequency']
namelist['visualization'] = {}
namelist['visualization']['frequency'] = 60.0
namelist['meta'] = {}
namelist['meta']['simname'] = 'StableBubble'
namelist['meta']['casename'] = 'StableBubble'
return namelist
def Bomex():
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 64
namelist['grid']['ny'] = 64
namelist['grid']['nz'] = 75
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 100.0
namelist['grid']['dy'] = 100.0
namelist['grid']['dz'] = 100 / 2.5
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 10.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 21600.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant'
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'None_SA'
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] = {}
namelist['sgs']['Smagorinsky']['cs'] = 0.17
namelist['sgs']['UniformViscosity'] = {}
namelist['sgs']['UniformViscosity']['viscosity'] = 1.2
namelist['sgs']['UniformViscosity']['diffusivity'] = 3.6
namelist['sgs']['TKE'] = {}
namelist['sgs']['TKE']['ck'] = 0.1
namelist['sgs']['TKE']['cn'] = 0.76
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.2
namelist['damping']['Rayleigh']['z_d'] = 600
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['Cumulus','TKE']
namelist['stats_io']['frequency'] = 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 1800.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy_frequency','viscosity']
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 600.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
namelist['visualization'] = {}
namelist['visualization']['frequency'] = 1800.0
namelist['meta'] = {}
namelist['meta']['simname'] = 'Bomex'
namelist['meta']['casename'] = 'Bomex'
namelist['ClausiusClapeyron'] = {}
namelist['ClausiusClapeyron']['temperature_min'] = 100.15
namelist['ClausiusClapeyron']['temperature_max'] = 500.0
namelist['initialization'] = {}
namelist['initialization']['random_seed_factor'] = 1
return namelist
def Gabls():
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 64
namelist['grid']['ny'] = 64
namelist['grid']['nz'] = 64
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 6.25
namelist['grid']['dy'] = 6.25
namelist['grid']['dz'] = 6.25
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] =1.0
namelist['time_stepping']['dt_max'] = 2.0
namelist['time_stepping']['t_max'] = 43200.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant'
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'None_Dry'
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] ={}
namelist['sgs']['Smagorinsky']['cs'] = 0.17
namelist['sgs']['Smagorinsky']['prt'] = 1.0/3.0
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.02
namelist['damping']['Rayleigh']['z_d'] = 100.0
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['StableBL']
namelist['stats_io']['frequency'] = 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 1800.0
namelist['fields_io']['diagnostic_fields'] = ['temperature','buoyancy_frequency','viscosity']
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 600.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
namelist['meta'] = {}
namelist['meta']['simname'] = 'Gabls'
namelist['meta']['casename'] = 'Gabls'
return namelist
def DYCOMS_RF01():
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 96
namelist['grid']['ny'] = 96
namelist['grid']['nz'] = 300
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 35.0
namelist['grid']['dy'] = 35.0
namelist['grid']['dz'] = 5.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 4.0
namelist['time_stepping']['t_max'] = 4.0 * 3600.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant'
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'None_SA'
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['microphysics']['cloud_sedimentation'] = False
namelist['microphysics']['ccn'] = 100.0e6
namelist['radiation'] = {}
namelist['radiation']['use_RRTM'] = True
namelist['radiation']['RRTM'] = {}
namelist['radiation']['RRTM']['frequency'] = 60.0
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
#namelist['sgs']['UniformViscosity']['diffusivity'] = 4.0
#namelist['sgs']['UniformViscosity']['viscosity'] = 3*4.0
namelist['sgs']['Smagorinsky'] = {}
namelist['sgs']['Smagorinsky']['iles'] = True
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.002
namelist['damping']['Rayleigh']['z_d'] = 500.0
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['DYCOMS', 'Flux','TKE']
namelist['stats_io']['frequency'] = 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 3600.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy_frequency']
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 600.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
namelist['visualization'] = {}
namelist['visualization']['frequency'] = 1e6
namelist['meta'] = {}
namelist['meta']['simname'] = 'DYCOMS_RF01'
namelist['meta']['casename'] = 'DYCOMS_RF01'
return namelist
def DYCOMS_RF02():
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 128
namelist['grid']['ny'] = 128
namelist['grid']['nz'] = 300
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 50.0
namelist['grid']['dy'] = 50.0
namelist['grid']['dz'] = 5.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 6.0 * 3600.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant'
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'SB_Liquid'
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['microphysics']['cloud_sedimentation'] = True
namelist['microphysics']['ccn'] = 55.0e6
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] = {}
namelist['sgs']['Smagorinsky']['iles'] = True
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.002
namelist['damping']['Rayleigh']['z_d'] = 500.0
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['DYCOMS', 'Flux']
namelist['stats_io']['frequency'] = 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 3600.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy_frequency']
namelist['visualization'] = {}
namelist['visualization']['frequency'] = 1e6
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 600.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
namelist['meta'] = {}
namelist['meta']['simname'] = 'DYCOMS_RF02'
namelist['meta']['casename'] = 'DYCOMS_RF02'
return namelist
def SMOKE():
'''
Namelist generator for the smoke cloud case:
Bretherton, C. S., and coauthors, 1999:
An intercomparison of radiatively- driven entrainment and turbulence in a smoke cloud,
as simulated by different numerical models. Quart. J. Roy. Meteor. Soc., 125, 391-423. Full text copy.
:return:
'''
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 64
namelist['grid']['ny'] = 64
namelist['grid']['nz'] = 50
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 50.0
namelist['grid']['dy'] = 50.0
namelist['grid']['dz'] = 25.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 4.0 * 3600.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant'
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'None_Dry'
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.002
namelist['damping']['Rayleigh']['z_d'] = 500.0
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['SMOKE']
namelist['stats_io']['frequency'] = 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 3600.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy_frequency','viscosity']
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 600.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
namelist['meta'] = {}
namelist['meta']['simname'] = 'SMOKE'
namelist['meta']['casename'] = 'SMOKE'
return namelist
def Rico(): # Rico = Rain in Cumulus Over the Ocean
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 128
namelist['grid']['ny'] = 128
namelist['grid']['nz'] = 150
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 100.0
namelist['grid']['dy'] = 100.0
namelist['grid']['dz'] = 40.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 3600.0*24.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant'
namelist['microphysics'] = {}
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['microphysics']['cloud_sedimentation'] = False
namelist['microphysics']['ccn'] = 70.0e6
namelist['microphysics']['scheme'] = 'SB_Liquid'
namelist['microphysics']['SB_Liquid'] = {}
namelist['microphysics']['SB_Liquid']['nu_droplet'] = 0
namelist['microphysics']['SB_Liquid']['mu_rain'] = 1
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['scalar_transport']['order_sedimentation'] = 1
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.2
namelist['damping']['Rayleigh']['z_d'] = 800
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['Cumulus']
namelist['stats_io']['frequency'] = 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 1800.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy_frequency','viscosity']
namelist['meta'] = {}
namelist['meta']['simname'] = 'Rico'
namelist['meta']['casename'] = 'Rico'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 600.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
return namelist
def Isdac():
namelist = {}
namelist["grid"] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 64
namelist['grid']['ny'] = 64
namelist['grid']['nz'] = 250
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 50.0
namelist['grid']['dy'] = 50.0
namelist['grid']['dz'] = 10.0
namelist["mpi"] = {}
namelist["mpi"]["nprocx"] = 1
namelist["mpi"]["nprocy"] = 1
namelist["mpi"]["nprocz"] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.5
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 3600.0 * 8.0
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'Arctic_1M'
namelist['microphysics']['phase_partitioning'] = 'Arctic'
namelist['microphysics']['n0_ice'] = 1.0e7
namelist["sgs"] = {}
namelist["sgs"]['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] = {}
namelist['sgs']['Smagorinsky']['iles'] = True
namelist['radiation'] = {}
namelist['radiation']['use_RRTM'] = False
namelist['radiation']['RRTM'] = {}
namelist['radiation']['RRTM']['frequency'] = 60.0
namelist['radiation']['RRTM']['buffer_points'] = 15
namelist['radiation']['RRTM']['patch_pressure'] = 600.0*100.0
namelist['radiation']['RRTM']['adjes'] = 0.0
namelist["diffusion"] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['scalar_transport']['order_sedimentation'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.2
namelist['damping']['Rayleigh']['z_d'] = 600
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = "stats"
namelist['stats_io']['auxiliary'] = 'None'
namelist['stats_io']['frequency'] = 30.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = "fields"
namelist['fields_io']['frequency'] = 36000.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy_frequency','viscosity']
namelist['meta'] = {}
namelist['meta']['simname'] = 'Isdac'
namelist['meta']['casename'] = 'Isdac'
return namelist
def IsdacCC():
namelist = {}
namelist["grid"] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 64
namelist['grid']['ny'] = 64
namelist['grid']['nz'] = 250
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 50.0
namelist['grid']['dy'] = 50.0
namelist['grid']['dz'] = 10.0
namelist["mpi"] = {}
namelist["mpi"]["nprocx"] = 1
namelist["mpi"]["nprocy"] = 1
namelist["mpi"]["nprocz"] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.5
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 3600.0 * 8.0
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'Arctic_1M'
namelist['microphysics']['phase_partitioning'] = 'Arctic'
namelist['microphysics']['n0_ice'] = 1.0e7
namelist['sgs'] = {}
namelist["sgs"]['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] = {}
namelist['sgs']['Smagorinsky']['iles'] = True
namelist["diffusion"] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['scalar_transport']['order_sedimentation'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.2
namelist['damping']['Rayleigh']['z_d'] = 600
namelist['initial'] = {}
namelist['initial']['SST'] = 265.0 #initial surface temperature
namelist['initial']['dTi'] = 7.0 #temperature jump at the inversion
namelist['initial']['rh0'] = 0.8 #Surface relative humidity
namelist['initial']['gamma'] = 5.0/1000. #free tropospheric lapse rate
namelist['initial']['rh'] = 0.6 #free tropospheric relative humidity
namelist['initial']['z_top'] = 820.0 #top of mixed layer
namelist['initial']['dzi'] = 30.0 #inversion height
namelist['initial']['dSST'] = 8.0 #SST change (climate change)
namelist['initial']['divergence'] = 5.0e-6 # LS divergence
namelist['initial']['fix_dqt'] = True
namelist['surface'] = {}
namelist['surface']['sensible'] = 0.0 #surface sensible heat flux Wm-2
namelist['radiation'] = {}
namelist['radiation']['use_RRTM'] = True
namelist['radiation']['RRTM'] = {}
namelist['radiation']['RRTM']['frequency'] = 60.0
namelist['radiation']['RRTM']['buffer_points'] = 15
namelist['radiation']['RRTM']['patch_pressure'] = 600.0*100.0
namelist['radiation']['RRTM']['adjes'] = 0.0
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = "stats"
namelist['stats_io']['auxiliary'] = 'None'
namelist['stats_io']['frequency'] = 30.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = "fields"
namelist['fields_io']['frequency'] = 36000.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy_frequency','viscosity']
namelist['meta'] = {}
namelist['meta']['simname'] = 'IsdacCC'
namelist['meta']['casename'] = 'IsdacCC'
return namelist
def Mpace():
namelist = {}
namelist["grid"] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 64
namelist['grid']['ny'] = 64
namelist['grid']['nz'] = 250
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 50.0
namelist['grid']['dy'] = 50.0
namelist['grid']['dz'] = 10.0
namelist["mpi"] = {}
namelist["mpi"]["nprocx"] = 1
namelist["mpi"]["nprocy"] = 1
namelist["mpi"]["nprocz"] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.5
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 3600.0 * 12.0
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'Arctic_1M'
namelist['microphysics']['phase_partitioning'] = 'Arctic'
namelist['microphysics']['n0_ice'] = 1.0e7
namelist["sgs"] = {}
namelist["sgs"]['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] = {}
namelist['sgs']['Smagorinsky']['iles'] = True
namelist['radiation'] = {}
namelist['radiation']['use_RRTM'] = True
namelist['radiation']['RRTM'] = {}
namelist['radiation']['RRTM']['frequency'] = 60.0
namelist['radiation']['RRTM']['buffer_points'] = 15
namelist['radiation']['RRTM']['patch_pressure'] = 600.0*100.0
namelist['radiation']['RRTM']['dyofyr'] = 283
namelist['radiation']['RRTM']['daily_mean_sw'] = False
namelist['radiation']['RRTM']['hourz'] = 17.0
namelist['radiation']['RRTM']['latitude'] = 71.75
namelist['radiation']['RRTM']['longitude'] = 151.0
namelist["diffusion"] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['scalar_transport']['order_sedimentation'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.2
namelist['damping']['Rayleigh']['z_d'] = 600
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = "stats"
namelist['stats_io']['auxiliary'] = 'None'
namelist['stats_io']['frequency'] = 30.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = "fields"
namelist['fields_io']['frequency'] = 36000.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature']
namelist['meta'] = {}
namelist['meta']['simname'] = 'Mpace'
namelist['meta']['casename'] = 'Mpace'
return namelist
def Sheba():
namelist = {}
namelist["grid"] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 64
namelist['grid']['ny'] = 64
namelist['grid']['nz'] = 250
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 50.0
namelist['grid']['dy'] = 50.0
namelist['grid']['dz'] = 10.0
namelist["mpi"] = {}
namelist["mpi"]["nprocx"] = 1
namelist["mpi"]["nprocy"] = 1
namelist["mpi"]["nprocz"] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.5
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 3600.0 * 12.0
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'Arctic_1M'
namelist['microphysics']['phase_partitioning'] = 'Arctic'
namelist['microphysics']['n0_ice'] = 1.0e7
namelist["sgs"] = {}
namelist["sgs"]['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] = {}
namelist['sgs']['Smagorinsky']['iles'] = True
namelist['radiation'] = {}
namelist['radiation']['use_RRTM'] = True
namelist['radiation']['RRTM'] = {}
namelist['radiation']['RRTM']['frequency'] = 60.0
namelist['radiation']['RRTM']['buffer_points'] = 15
namelist['radiation']['RRTM']['stretch_factor'] = 1.2
namelist['radiation']['RRTM']['patch_pressure'] = 500.0*100.0
namelist['radiation']['RRTM']['dyofyr'] = 127
namelist['radiation']['RRTM']['daily_mean_sw'] = False
namelist['radiation']['RRTM']['hourz'] = 12.0
namelist['radiation']['RRTM']['latitude'] = 76.0
namelist['radiation']['RRTM']['longitude'] = 195.0
namelist['radiation']['RRTM']['adir'] = 0.827
namelist["diffusion"] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['scalar_transport']['order_sedimentation'] = 5
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.2
namelist['damping']['Rayleigh']['z_d'] = 600
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = "stats"
namelist['stats_io']['auxiliary'] = 'None'
namelist['stats_io']['frequency'] = 30.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = "fields"
namelist['fields_io']['frequency'] = 36000.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature']
namelist['meta'] = {}
namelist['meta']['simname'] = 'Sheba'
namelist['meta']['casename'] = 'Sheba'
return namelist
def CGILS_S6(is_p2,is_ctl_omega):
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 96
namelist['grid']['ny'] = 96
namelist['grid']['nz'] = 180
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 100.0
namelist['grid']['dy'] = 100.0
namelist['grid']['dz'] = 30.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 3600.0*24.0*10.0 # 10 days
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'variable'
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.02
namelist['damping']['Rayleigh']['z_d'] = 600.0
namelist['microphysics'] = {}
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['microphysics']['cloud_sedimentation'] = True
namelist['microphysics']['ccn'] = 100.0e6
namelist['microphysics']['scheme'] = 'SB_Liquid'
namelist['microphysics']['SB_Liquid'] = {}
namelist['microphysics']['SB_Liquid']['nu_droplet'] = 0
namelist['microphysics']['SB_Liquid']['mu_rain'] = 1
namelist['radiation'] = {}
namelist['radiation']['RRTM'] = {}
namelist['radiation']['RRTM']['frequency'] = 90.0
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] ={}
namelist['sgs']['Smagorinsky']['iles'] = False
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['scalar_transport']['order_sedimentation'] = 1
namelist['radiation'] = {}
namelist['radiation']['RRTM'] = {}
namelist['radiation']['RRTM']['frequency'] = 90.0
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['Cumulus']
namelist['stats_io']['frequency'] = 5 * 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 86400.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy']
namelist['meta'] = {}
namelist['meta']['CGILS'] = {}
namelist['meta']['casename'] = 'CGILS'
namelist['meta']['CGILS']['location'] = 6
namelist['meta']['CGILS']['P2'] = is_p2
namelist['meta']['CGILS']['CTL_omega'] = is_ctl_omega
simname = 'CGILS_S' + str(namelist['meta']['CGILS']['location'] )
if namelist['meta']['CGILS']['P2']:
if namelist['meta']['CGILS']['CTL_omega']:
simname += '_P2'
else:
simname += '_P2S'
else:
simname += '_CTL'
namelist['meta']['simname'] = simname
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['restart']['delete_old'] = True
namelist['restart']['times_retained'] = range(86400, 86400*11, 86400)
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 43200.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
return namelist
def CGILS_S11(is_p2,is_ctl_omega):
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 96
namelist['grid']['ny'] = 96
namelist['grid']['nz'] = 180
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 50.0
namelist['grid']['dy'] = 50.0
namelist['grid']['dz'] = 20.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 3600.0*24.0*10.0 # 10 days
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'variable'
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.02
namelist['damping']['Rayleigh']['z_d'] = 600.0
namelist['microphysics'] = {}
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['microphysics']['cloud_sedimentation'] = True
namelist['microphysics']['ccn'] = 100.0e6
namelist['microphysics']['scheme'] = 'SB_Liquid'
namelist['microphysics']['SB_Liquid'] = {}
namelist['microphysics']['SB_Liquid']['nu_droplet'] = 0
namelist['microphysics']['SB_Liquid']['mu_rain'] = 1
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] ={}
namelist['sgs']['Smagorinsky']['iles'] = False
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['scalar_transport']['order_sedimentation'] = 1
namelist['radiation'] = {}
namelist['radiation']['RRTM'] = {}
namelist['radiation']['RRTM']['frequency'] = 90.0
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['Flux']
namelist['stats_io']['frequency'] = 5 * 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 86400.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy']
namelist['meta'] = {}
namelist['meta']['CGILS'] = {}
namelist['meta']['casename'] = 'CGILS'
namelist['meta']['CGILS']['location'] = 11
namelist['meta']['CGILS']['P2'] = is_p2
namelist['meta']['CGILS']['CTL_omega'] = is_ctl_omega
simname = 'CGILS_S' + str(namelist['meta']['CGILS']['location'] )
if namelist['meta']['CGILS']['P2']:
if namelist['meta']['CGILS']['CTL_omega']:
simname += '_P2'
else:
simname += '_P2S'
else:
simname += '_CTL'
namelist['meta']['simname'] = simname
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['restart']['delete_old'] = True
namelist['restart']['times_retained'] = range(86400, 86400*11, 86400)
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 43200.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
return namelist
def CGILS_S12(is_p2,is_ctl_omega):
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 96
namelist['grid']['ny'] = 96
namelist['grid']['nz'] = 200
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 25.0
namelist['grid']['dy'] = 25.0
namelist['grid']['dz'] = 10.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 3600.0*24.0*10.0 # 10 days
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'variable'
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.02
namelist['damping']['Rayleigh']['z_d'] = 500.0
namelist['microphysics'] = {}
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['microphysics']['cloud_sedimentation'] = True
namelist['microphysics']['ccn'] = 100.0e6
namelist['microphysics']['scheme'] = 'SB_Liquid'
namelist['microphysics']['SB_Liquid'] = {}
namelist['microphysics']['SB_Liquid']['nu_droplet'] = 0
namelist['microphysics']['SB_Liquid']['mu_rain'] = 1
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] ={}
namelist['sgs']['Smagorinsky']['iles'] = False
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['scalar_transport']['order_sedimentation'] = 1
namelist['radiation'] = {}
namelist['radiation']['RRTM'] = {}
namelist['radiation']['RRTM']['frequency'] = 90.0
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['Flux']
namelist['stats_io']['frequency'] = 5 * 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 86400.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy']
namelist['meta'] = {}
namelist['meta']['CGILS'] = {}
namelist['meta']['casename'] = 'CGILS'
namelist['meta']['CGILS']['location'] = 12
namelist['meta']['CGILS']['P2'] = is_p2
namelist['meta']['CGILS']['CTL_omega'] = is_ctl_omega
simname = 'CGILS_S' + str(namelist['meta']['CGILS']['location'] )
if namelist['meta']['CGILS']['P2']:
if namelist['meta']['CGILS']['CTL_omega']:
simname += '_P2'
else:
simname += '_P2S'
else:
simname += '_CTL'
namelist['meta']['simname'] = simname
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['restart']['delete_old'] = True
namelist['restart']['times_retained'] = range(86400, 86400*11, 86400)
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 43200.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
return namelist
def ZGILS(zgils_loc):
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
namelist['grid']['nx'] = 86
namelist['grid']['ny'] = 86
namelist['grid']['nz'] = 216
namelist['grid']['gw'] = 3
namelist['grid']['dx'] = 75.0
namelist['grid']['dy'] = 75.0
namelist['grid']['dz'] = 20.0
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3
namelist['time_stepping']['cfl_limit'] = 0.7
namelist['time_stepping']['dt_initial'] = 1.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 3600.0*24.0*20.0 # 20 days
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'variable'
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh'
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.2
namelist['damping']['Rayleigh']['z_d'] = 500.0
namelist['microphysics'] = {}
namelist['microphysics']['phase_partitioning'] = 'liquid_only'
namelist['microphysics']['cloud_sedimentation'] = True
namelist['microphysics']['ccn'] = 100.0e6
namelist['microphysics']['scheme'] = 'SB_Liquid'
namelist['microphysics']['SB_Liquid'] = {}
namelist['microphysics']['SB_Liquid']['nu_droplet'] = 0
namelist['microphysics']['SB_Liquid']['mu_rain'] = 1
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] ={}
namelist['sgs']['Smagorinsky']['iles'] = False
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 5
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 5
namelist['scalar_transport']['order_sedimentation'] = 1
namelist['surface_budget'] = {}
if zgils_loc == 12:
namelist['surface_budget']['ocean_heat_flux'] = 70.0
elif zgils_loc == 11:
namelist['surface_budget']['ocean_heat_flux'] = 90.0
elif zgils_loc == 6:
namelist['surface_budget']['ocean_heat_flux'] = 60.0
# To run a fixed_sst case set fixed_sst_time > t_max of simulation
namelist['surface_budget']['fixed_sst_time'] = 24.0 * 3600.0 * 30.0 # 3 days spinup
namelist['radiation'] = {}
namelist['radiation']['RRTM'] = {}
namelist['radiation']['RRTM']['frequency'] = 90.0
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['Flux']
namelist['stats_io']['frequency'] = 5 * 60.0
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 86400.0
namelist['fields_io']['diagnostic_fields'] = ['ql','temperature','buoyancy']
namelist['meta'] = {}
namelist['meta']['ZGILS'] = {}
namelist['meta']['casename'] = 'ZGILS'
namelist['meta']['ZGILS']['location'] = zgils_loc
simname = 'ZGILS_S' + str(namelist['meta']['ZGILS']['location'] )
namelist['meta']['simname'] = simname
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['restart']['delete_old'] = True
namelist['restart']['times_retained'] = range(86400, 86400*21, 86400)
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 43200.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
return namelist
def DCBLSoares():
# adopted from: "An eddy-diffusivity/mass-flux parametrization for dry and shallow cumulus convection",
# By P. M. M. SOARES, P. M. A. MIRANDA, A. P. SIEBESMA and J. TEIXEIRA, Q. J. R. Meteorol. Soc. (2004)
# modifications: qt initial profile and flux set to zero, since no dry thermodynamics without condensation given
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
# Soares (2004): domain size = 6400 x 6400 m, domain height = 3000 (?) m; dx = ?, dy = ?, dz = 20 m
# Nieuwstadt: domain size = ?, domain height = 2400m; dx = dy = 60 m, dz = 50-60 m
# IOP Paper, old code: domain size = 6400 x 6400 m, domain height = 3750 m
namelist['grid']['nx'] = 256 # IOP
namelist['grid']['ny'] = 256 # IOP
namelist['grid']['nz'] = 150 # IOP
namelist['grid']['gw'] = 3 # for 2nd order
namelist['grid']['dx'] = 25.0 # IOP
namelist['grid']['dy'] = 25.0 # IOP
namelist['grid']['dz'] = 25.0 # IOP
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3 # seems to be 3 in all cases???
namelist['time_stepping']['cfl_limit'] = 0.3 # default: 0.7; IOP: 0.3
namelist['time_stepping']['dt_initial'] = 10.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 6*3600.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant' # 'constant' or 'variable', for Clausius Clapeyron calculation
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'None_Dry' # Bomex: 'None_SA'; options: 'None_Dry' (no qt as Progn. Var.), 'None_SA', 'SB_Liquid'
namelist['microphysics']['phase_partitioning'] = 'liquid_only' # seems to be this in all cases???
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] = {}
namelist['sgs']['Smagorinsky']['cs'] = 0.17
namelist['sgs']['UniformViscosity'] = {}
namelist['sgs']['UniformViscosity']['viscosity'] = 1.2
namelist['sgs']['UniformViscosity']['diffusivity'] = 3.6
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False # seems to be set to False for all cases???
# 2 = second_order_m
# 32 = second_order_ml_m
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 2
# 2 = second_order_a
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 2
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh' # no more 'DampingToDomainMean' ???
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.02
namelist['damping']['Rayleigh']['z_d'] = 800.0 # ??? depth of damping layer?
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
# profile outputs
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['Flux'] # AuxiliaryStatistics
namelist['stats_io']['frequency'] = 900.0
# field outputs
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 1800.0
namelist['fields_io']['diagnostic_fields'] = ['temperature','viscosity'] # defines diagnostic variable output fields (progn. variables output in restart files?!)
# Conditional Statistics
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 600.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
namelist['meta'] = {}
namelist['meta']['simname'] = 'DCBLSoares'
namelist['meta']['casename'] = 'DCBLSoares'
namelist['restart'] = {}
namelist['restart']['output'] = False
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['visualization'] = {}
namelist['visualization']['frequency'] = 1800.0
namelist['stochastic_noise'] = {}
namelist['stochastic_noise']['flag'] = False
namelist['stochastic_noise']['amplitude'] = 0.05
namelist['tracers'] = {}
namelist['tracers']['use_tracers'] = 'passive'
namelist['tracers']['kmin'] = 0
namelist['tracers']['kmax'] = 10
return namelist
def DCBLSoares_moist():
# adopted from: "An eddy-diffusivity/mass-flux parametrization for dry and shallow cumulus convection",
# By P. M. M. SOARES, P. M. A. MIRANDA, A. P. SIEBESMA and J. TEIXEIRA, Q. J. R. Meteorol. Soc. (2004)
# modifications: qt initial profile and flux set to zero, since no dry thermodynamics without condensation given
namelist = {}
namelist['grid'] = {}
namelist['grid']['dims'] = 3
# Soares (2004): domain size = 6400 x 6400 m, domain height = 3000 (?) m; dx = ?, dy = ?, dz = 20 m
# Nieuwstadt: domain size = ?, domain height = 2400m; dx = dy = 60 m, dz = 50-60 m
# IOP Paper, old code: domain size = 6400 x 6400 m, domain height = 3750 m
namelist['grid']['nx'] = 256 # IOP
namelist['grid']['ny'] = 256 # IOP
namelist['grid']['nz'] = 150 # IOP
namelist['grid']['gw'] = 3 # for 2nd order
namelist['grid']['dx'] = 25.0 # IOP
namelist['grid']['dy'] = 25.0 # IOP
namelist['grid']['dz'] = 25.0 # IOP
namelist['mpi'] = {}
namelist['mpi']['nprocx'] = 1
namelist['mpi']['nprocy'] = 1
namelist['mpi']['nprocz'] = 1
namelist['time_stepping'] = {}
namelist['time_stepping']['ts_type'] = 3 # seems to be 3 in all cases???
namelist['time_stepping']['cfl_limit'] = 0.3 # default: 0.7; IOP: 0.3
namelist['time_stepping']['dt_initial'] = 10.0
namelist['time_stepping']['dt_max'] = 10.0
namelist['time_stepping']['t_max'] = 6*3600.0
namelist['thermodynamics'] = {}
namelist['thermodynamics']['latentheat'] = 'constant' # 'constant' or 'variable', for Clausius Clapeyron calculation
namelist['microphysics'] = {}
namelist['microphysics']['scheme'] = 'None_SA' # DCBL: 'None_Dry', Bomex: 'None_SA'; options: 'None_Dry' (no qt as Progn. Var.), 'None_SA', 'SB_Liquid'
namelist['microphysics']['phase_partitioning'] = 'liquid_only' # seems to be this in all cases???
namelist['sgs'] = {}
namelist['sgs']['scheme'] = 'Smagorinsky'
namelist['sgs']['Smagorinsky'] = {}
namelist['sgs']['Smagorinsky']['cs'] = 0.17
namelist['sgs']['UniformViscosity'] = {}
namelist['sgs']['UniformViscosity']['viscosity'] = 1.2
namelist['sgs']['UniformViscosity']['diffusivity'] = 3.6
namelist['sgs']['TKE'] = {}
namelist['sgs']['TKE']['ck'] = 0.1
namelist['sgs']['TKE']['cn'] = 0.76
namelist['diffusion'] = {}
namelist['diffusion']['qt_entropy_source'] = False # seems to be set to False for all cases???
# 2 = second_order_m
# 32 = second_order_ml_m
namelist['momentum_transport'] = {}
namelist['momentum_transport']['order'] = 4
# 2 = second_order_a
namelist['scalar_transport'] = {}
namelist['scalar_transport']['order'] = 4
namelist['damping'] = {}
namelist['damping']['scheme'] = 'Rayleigh' # no more 'DampingToDomainMean' ???
namelist['damping']['Rayleigh'] = {}
namelist['damping']['Rayleigh']['gamma_r'] = 0.02
namelist['damping']['Rayleigh']['z_d'] = 800.0 # ??? depth of damping layer?
namelist['output'] = {}
namelist['output']['output_root'] = './'
namelist['restart'] = {}
namelist['restart']['output'] = True
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
# profile outputs
namelist['stats_io'] = {}
namelist['stats_io']['stats_dir'] = 'stats'
namelist['stats_io']['auxiliary'] = ['Fluxes'] # AuxiliaryStatistics
namelist['stats_io']['frequency'] = 600.0
# field outputs
namelist['fields_io'] = {}
namelist['fields_io']['fields_dir'] = 'fields'
namelist['fields_io']['frequency'] = 1800.0
namelist['fields_io']['diagnostic_fields'] = ['temperature','viscosity'] # defines diagnostic variable output fields (progn. variables output in restart files?!)
# Conditional Statistics
namelist['conditional_stats'] ={}
namelist['conditional_stats']['classes'] = ['Spectra']
namelist['conditional_stats']['frequency'] = 600.0
namelist['conditional_stats']['stats_dir'] = 'cond_stats'
namelist['meta'] = {}
namelist['meta']['simname'] = 'DCBLSoares_moist'
namelist['meta']['casename'] = 'DCBLSoares_moist'
namelist['restart'] = {}
namelist['restart']['output'] = False
namelist['restart']['init_from'] = False
namelist['restart']['input_path'] = './'
namelist['restart']['frequency'] = 600.0
namelist['visualization'] = {}
namelist['visualization']['frequency'] = 1800.0
namelist['tracers'] = {}
namelist['tracers']['use_tracers'] = 'passive'
namelist['tracers']['tracer_profile'] = 'smooth'
namelist['tracers']['kmin'] = 0
namelist['tracers']['kmax'] = 10
namelist['ClausiusClapeyron'] = {}
namelist['ClausiusClapeyron']['temperature_min'] = 100.15
namelist['ClausiusClapeyron']['temperature_max'] = 500.0
return namelist
def write_file(namelist):
try:
type(namelist['meta']['simname'])
except:
print('Casename not specified in namelist dictionary!')
print('FatalError')
exit()
namelist['meta']['uuid'] = str(uuid.uuid4())
fh = open(namelist['meta']['simname'] + '.in', 'w')
pprint.pprint(namelist)
json.dump(namelist, fh, sort_keys=True, indent=4)
fh.close()
return
if __name__ == '__main__':
main()
| gpl-3.0 |
bcorbet/SickRage | lib/hachoir_core/field/helper.py | 90 | 1905 | from lib.hachoir_core.field import (FieldError,
RawBits, RawBytes,
PaddingBits, PaddingBytes,
NullBits, NullBytes,
GenericString, GenericInteger)
from lib.hachoir_core.stream import FileOutputStream
def createRawField(parent, size, name="raw[]", description=None):
if size <= 0:
raise FieldError("Unable to create raw field of %s bits" % size)
if (size % 8) == 0:
return RawBytes(parent, name, size/8, description)
else:
return RawBits(parent, name, size, description)
def createPaddingField(parent, nbits, name="padding[]", description=None):
if nbits <= 0:
raise FieldError("Unable to create padding of %s bits" % nbits)
if (nbits % 8) == 0:
return PaddingBytes(parent, name, nbits/8, description)
else:
return PaddingBits(parent, name, nbits, description)
def createNullField(parent, nbits, name="padding[]", description=None):
if nbits <= 0:
raise FieldError("Unable to create null padding of %s bits" % nbits)
if (nbits % 8) == 0:
return NullBytes(parent, name, nbits/8, description)
else:
return NullBits(parent, name, nbits, description)
def isString(field):
return issubclass(field.__class__, GenericString)
def isInteger(field):
return issubclass(field.__class__, GenericInteger)
def writeIntoFile(fieldset, filename):
output = FileOutputStream(filename)
fieldset.writeInto(output)
def createOrphanField(fieldset, address, field_cls, *args, **kw):
"""
Create an orphan field at specified address:
field_cls(fieldset, *args, **kw)
The field uses the fieldset properties but it isn't added to the
field set.
"""
save_size = fieldset._current_size
try:
fieldset._current_size = address
field = field_cls(fieldset, *args, **kw)
finally:
fieldset._current_size = save_size
return field
| gpl-3.0 |
edwardslabs/CloudBot | plugins/brew.py | 2 | 1692 | import requests
from requests import HTTPError
from cloudbot import hook
api_url = "http://api.brewerydb.com/v2/search?format=json"
@hook.on_start()
def load_key(bot):
global api_key
api_key = bot.config.get("api_keys", {}).get("brewerydb", None)
@hook.command('brew')
def brew(text, reply):
"""<query> - returns the first brewerydb search result for <query>"""
if not api_key:
return "No brewerydb API key set."
params = {'key': api_key, 'type': 'beer', 'withBreweries': 'Y', 'q': text}
request = requests.get(api_url, params=params)
try:
request.raise_for_status()
except HTTPError:
reply("Failed to fetch info ({})".format(request.status_code))
raise
response = request.json()
output = "No results found."
try:
if 'totalResults' in response:
beer = response['data'][0]
brewery = beer['breweries'][0]
style = 'unknown style'
if 'style' in beer:
style = beer['style']['shortName']
abv = '?.?'
if 'abv' in beer:
abv = beer['abv']
url = '[no website found]'
if 'website' in brewery:
url = brewery['website']
content = {
'name': beer['nameDisplay'],
'style': style,
'abv': abv,
'brewer': brewery['name'],
'url': url
}
output = "{name} by {brewer} ({style}, {abv}% ABV) - {url}" \
.format(**content)
except Exception as e:
print(e)
reply("Error parsing results.")
raise
return output
| gpl-3.0 |
utecuy/edx-platform | common/test/acceptance/pages/lms/discussion.py | 36 | 25473 | from contextlib import contextmanager
from bok_choy.javascript import wait_for_js
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, Promise
from .course_page import CoursePage
class DiscussionPageMixin(object):
def is_ajax_finished(self):
return self.browser.execute_script("return jQuery.active") == 0
class DiscussionThreadPage(PageObject, DiscussionPageMixin):
url = None
def __init__(self, browser, thread_selector):
super(DiscussionThreadPage, self).__init__(browser)
self.thread_selector = thread_selector
def _find_within(self, selector):
"""
Returns a query corresponding to the given CSS selector within the scope
of this thread page
"""
return self.q(css=self.thread_selector + " " + selector)
def is_browser_on_page(self):
return self.q(css=self.thread_selector).present
def _get_element_text(self, selector):
"""
Returns the text of the first element matching the given selector, or
None if no such element exists
"""
text_list = self._find_within(selector).text
return text_list[0] if text_list else None
def _is_element_visible(self, selector):
query = self._find_within(selector)
return query.present and query.visible
@contextmanager
def _secondary_action_menu_open(self, ancestor_selector):
"""
Given the selector for an ancestor of a secondary menu, return a context
manager that will open and close the menu
"""
self._find_within(ancestor_selector + " .action-more").click()
EmptyPromise(
lambda: self._is_element_visible(ancestor_selector + " .actions-dropdown"),
"Secondary action menu opened"
).fulfill()
yield
if self._is_element_visible(ancestor_selector + " .actions-dropdown"):
self._find_within(ancestor_selector + " .action-more").click()
EmptyPromise(
lambda: not self._is_element_visible(ancestor_selector + " .actions-dropdown"),
"Secondary action menu closed"
).fulfill()
def get_group_visibility_label(self):
"""
Returns the group visibility label shown for the thread.
"""
return self._get_element_text(".group-visibility-label")
def get_response_total_text(self):
"""Returns the response count text, or None if not present"""
return self._get_element_text(".response-count")
def get_num_displayed_responses(self):
"""Returns the number of responses actually rendered"""
return len(self._find_within(".discussion-response"))
def get_shown_responses_text(self):
"""Returns the shown response count text, or None if not present"""
return self._get_element_text(".response-display-count")
def get_load_responses_button_text(self):
"""Returns the load more responses button text, or None if not present"""
return self._get_element_text(".load-response-button")
def load_more_responses(self):
"""Clicks the load more responses button and waits for responses to load"""
self._find_within(".load-response-button").click()
EmptyPromise(
self.is_ajax_finished,
"Loading more Responses"
).fulfill()
def has_add_response_button(self):
"""Returns true if the add response button is visible, false otherwise"""
return self._is_element_visible(".add-response-btn")
def click_add_response_button(self):
"""
Clicks the add response button and ensures that the response text
field receives focus
"""
self._find_within(".add-response-btn").first.click()
EmptyPromise(
lambda: self._find_within(".discussion-reply-new textarea:focus").present,
"Response field received focus"
).fulfill()
@wait_for_js
def is_response_editor_visible(self, response_id):
"""Returns true if the response editor is present, false otherwise"""
return self._is_element_visible(".response_{} .edit-post-body".format(response_id))
@wait_for_js
def is_discussion_body_visible(self):
return self._is_element_visible(".post-body")
def is_mathjax_preview_available(self):
return self.q(css=".MathJax_Preview").text[0] == ""
def is_mathjax_rendered(self):
return self._is_element_visible(".MathJax")
def is_response_visible(self, comment_id):
"""Returns true if the response is viewable onscreen"""
return self._is_element_visible(".response_{} .response-body".format(comment_id))
def is_response_editable(self, response_id):
"""Returns true if the edit response button is present, false otherwise"""
with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
return self._is_element_visible(".response_{} .discussion-response .action-edit".format(response_id))
def get_response_body(self, response_id):
return self._get_element_text(".response_{} .response-body".format(response_id))
def start_response_edit(self, response_id):
"""Click the edit button for the response, loading the editing view"""
with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
self._find_within(".response_{} .discussion-response .action-edit".format(response_id)).first.click()
EmptyPromise(
lambda: self.is_response_editor_visible(response_id),
"Response edit started"
).fulfill()
def get_link_href(self):
"""Extracts href attribute of the referenced link"""
link_href = self._find_within(".post-body p a").attrs('href')
return link_href[0] if link_href else None
def get_response_vote_count(self, response_id):
return self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id))
def vote_response(self, response_id):
current_count = self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id))
self._find_within(".response_{} .discussion-response .action-vote".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: current_count != self.get_response_vote_count(response_id),
"Response is voted"
).fulfill()
def is_response_reported(self, response_id):
return self._is_element_visible(".response_{} .discussion-response .post-label-reported".format(response_id))
def report_response(self, response_id):
with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
self._find_within(".response_{} .discussion-response .action-report".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: self.is_response_reported(response_id),
"Response is reported"
).fulfill()
def is_response_endorsed(self, response_id):
return "endorsed" in self._get_element_text(".response_{} .discussion-response .posted-details".format(response_id))
def endorse_response(self, response_id):
self._find_within(".response_{} .discussion-response .action-endorse".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: self.is_response_endorsed(response_id),
"Response edit started"
).fulfill()
def set_response_editor_value(self, response_id, new_body):
"""Replace the contents of the response editor"""
self._find_within(".response_{} .discussion-response .wmd-input".format(response_id)).fill(new_body)
def submit_response_edit(self, response_id, new_response_body):
"""Click the submit button on the response editor"""
self._find_within(".response_{} .discussion-response .post-update".format(response_id)).first.click()
EmptyPromise(
lambda: (
not self.is_response_editor_visible(response_id) and
self.is_response_visible(response_id) and
self.get_response_body(response_id) == new_response_body
),
"Comment edit succeeded"
).fulfill()
def is_show_comments_visible(self, response_id):
"""Returns true if the "show comments" link is visible for a response"""
return self._is_element_visible(".response_{} .action-show-comments".format(response_id))
def show_comments(self, response_id):
"""Click the "show comments" link for a response"""
self._find_within(".response_{} .action-show-comments".format(response_id)).first.click()
EmptyPromise(
lambda: self._is_element_visible(".response_{} .comments".format(response_id)),
"Comments shown"
).fulfill()
def is_add_comment_visible(self, response_id):
"""Returns true if the "add comment" form is visible for a response"""
return self._is_element_visible("#wmd-input-comment-body-{}".format(response_id))
def is_comment_visible(self, comment_id):
"""Returns true if the comment is viewable onscreen"""
return self._is_element_visible("#comment_{} .response-body".format(comment_id))
def get_comment_body(self, comment_id):
return self._get_element_text("#comment_{} .response-body".format(comment_id))
def is_comment_deletable(self, comment_id):
"""Returns true if the delete comment button is present, false otherwise"""
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
return self._is_element_visible("#comment_{} .action-delete".format(comment_id))
def delete_comment(self, comment_id):
with self.handle_alert():
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
self._find_within("#comment_{} .action-delete".format(comment_id)).first.click()
EmptyPromise(
lambda: not self.is_comment_visible(comment_id),
"Deleted comment was removed"
).fulfill()
def is_comment_editable(self, comment_id):
"""Returns true if the edit comment button is present, false otherwise"""
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
return self._is_element_visible("#comment_{} .action-edit".format(comment_id))
def is_comment_editor_visible(self, comment_id):
"""Returns true if the comment editor is present, false otherwise"""
return self._is_element_visible(".edit-comment-body[data-id='{}']".format(comment_id))
def _get_comment_editor_value(self, comment_id):
return self._find_within("#wmd-input-edit-comment-body-{}".format(comment_id)).text[0]
def start_comment_edit(self, comment_id):
"""Click the edit button for the comment, loading the editing view"""
old_body = self.get_comment_body(comment_id)
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
self._find_within("#comment_{} .action-edit".format(comment_id)).first.click()
EmptyPromise(
lambda: (
self.is_comment_editor_visible(comment_id) and
not self.is_comment_visible(comment_id) and
self._get_comment_editor_value(comment_id) == old_body
),
"Comment edit started"
).fulfill()
def set_comment_editor_value(self, comment_id, new_body):
"""Replace the contents of the comment editor"""
self._find_within("#comment_{} .wmd-input".format(comment_id)).fill(new_body)
def submit_comment_edit(self, comment_id, new_comment_body):
"""Click the submit button on the comment editor"""
self._find_within("#comment_{} .post-update".format(comment_id)).first.click()
EmptyPromise(
lambda: (
not self.is_comment_editor_visible(comment_id) and
self.is_comment_visible(comment_id) and
self.get_comment_body(comment_id) == new_comment_body
),
"Comment edit succeeded"
).fulfill()
def cancel_comment_edit(self, comment_id, original_body):
"""Click the cancel button on the comment editor"""
self._find_within("#comment_{} .post-cancel".format(comment_id)).first.click()
EmptyPromise(
lambda: (
not self.is_comment_editor_visible(comment_id) and
self.is_comment_visible(comment_id) and
self.get_comment_body(comment_id) == original_body
),
"Comment edit was canceled"
).fulfill()
class DiscussionSortPreferencePage(CoursePage):
"""
Page that contain the discussion board with sorting options
"""
def __init__(self, browser, course_id):
super(DiscussionSortPreferencePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum"
def is_browser_on_page(self):
"""
Return true if the browser is on the right page else false.
"""
return self.q(css="body.discussion .forum-nav-sort-control").present
def get_selected_sort_preference(self):
"""
Return the text of option that is selected for sorting.
"""
options = self.q(css="body.discussion .forum-nav-sort-control option")
return options.filter(lambda el: el.is_selected())[0].get_attribute("value")
def change_sort_preference(self, sort_by):
"""
Change the option of sorting by clicking on new option.
"""
self.q(css="body.discussion .forum-nav-sort-control option[value='{0}']".format(sort_by)).click()
def refresh_page(self):
"""
Reload the page.
"""
self.browser.refresh()
class DiscussionTabSingleThreadPage(CoursePage):
def __init__(self, browser, course_id, discussion_id, thread_id):
super(DiscussionTabSingleThreadPage, self).__init__(browser, course_id)
self.thread_page = DiscussionThreadPage(
browser,
"body.discussion .discussion-article[data-id='{thread_id}']".format(thread_id=thread_id)
)
self.url_path = "discussion/forum/{discussion_id}/threads/{thread_id}".format(
discussion_id=discussion_id, thread_id=thread_id
)
def is_browser_on_page(self):
return self.thread_page.is_browser_on_page()
def __getattr__(self, name):
return getattr(self.thread_page, name)
def close_open_thread(self):
with self.thread_page._secondary_action_menu_open(".forum-thread-main-wrapper"):
self._find_within(".forum-thread-main-wrapper .action-close").first.click()
@wait_for_js
def is_window_on_top(self):
"""
Check if window's scroll is at top
"""
return self.browser.execute_script("return $('html, body').offset().top") == 0
def _thread_is_rendered_successfully(self, thread_id):
return self.q(css=".discussion-article[data-id='{}']".format(thread_id)).visible
def click_and_open_thread(self, thread_id):
"""
Click specific thread on the list.
"""
thread_selector = "li[data-id='{}']".format(thread_id)
self.q(css=thread_selector).first.click()
EmptyPromise(
lambda: self._thread_is_rendered_successfully(thread_id),
"Thread has been rendered"
).fulfill()
def check_threads_rendered_successfully(self, thread_count):
"""
Count the number of threads available on page.
"""
return len(self.q(css=".forum-nav-thread").results) == thread_count
def check_window_is_on_top(self):
"""
Check window is on top of the page
"""
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
class InlineDiscussionPage(PageObject):
url = None
def __init__(self, browser, discussion_id):
super(InlineDiscussionPage, self).__init__(browser)
self._discussion_selector = (
".discussion-module[data-discussion-id='{discussion_id}'] ".format(
discussion_id=discussion_id
)
)
def _find_within(self, selector):
"""
Returns a query corresponding to the given CSS selector within the scope
of this discussion page
"""
return self.q(css=self._discussion_selector + " " + selector)
def is_browser_on_page(self):
self.wait_for_ajax()
return self.q(css=self._discussion_selector).present
def is_discussion_expanded(self):
return self._find_within(".discussion").present
def expand_discussion(self):
"""Click the link to expand the discussion"""
self._find_within(".discussion-show").first.click()
EmptyPromise(
self.is_discussion_expanded,
"Discussion expanded"
).fulfill()
def get_num_displayed_threads(self):
return len(self._find_within(".discussion-thread"))
def has_thread(self, thread_id):
"""Returns true if this page is showing the thread with the specified id."""
return self._find_within('.discussion-thread#thread_{}'.format(thread_id)).present
def element_exists(self, selector):
return self.q(css=self._discussion_selector + " " + selector).present
def is_new_post_opened(self):
return self._find_within(".new-post-article").visible
def click_element(self, selector):
self.wait_for_element_presence(
"{discussion} {selector}".format(discussion=self._discussion_selector, selector=selector),
"{selector} is visible".format(selector=selector)
)
self._find_within(selector).click()
def click_cancel_new_post(self):
self.click_element(".cancel")
EmptyPromise(
lambda: not self.is_new_post_opened(),
"New post closed"
).fulfill()
def click_new_post_button(self):
self.click_element(".new-post-btn")
EmptyPromise(
self.is_new_post_opened,
"New post opened"
).fulfill()
@wait_for_js
def _is_element_visible(self, selector):
query = self._find_within(selector)
return query.present and query.visible
class InlineDiscussionThreadPage(DiscussionThreadPage):
def __init__(self, browser, thread_id):
super(InlineDiscussionThreadPage, self).__init__(
browser,
"body.courseware .discussion-module #thread_{thread_id}".format(thread_id=thread_id)
)
def expand(self):
"""Clicks the link to expand the thread"""
self._find_within(".forum-thread-expand").first.click()
EmptyPromise(
lambda: bool(self.get_response_total_text()),
"Thread expanded"
).fulfill()
def is_thread_anonymous(self):
return not self.q(css=".posted-details > .username").present
@wait_for_js
def check_if_selector_is_focused(self, selector):
"""
Check if selector is focused
"""
return self.browser.execute_script("return $('{}').is(':focus')".format(selector))
class DiscussionUserProfilePage(CoursePage):
TEXT_NEXT = u'Next >'
TEXT_PREV = u'< Previous'
PAGING_SELECTOR = "a.discussion-pagination[data-page-number]"
def __init__(self, browser, course_id, user_id, username, page=1):
super(DiscussionUserProfilePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum/dummy/users/{}?page={}".format(user_id, page)
self.username = username
def is_browser_on_page(self):
return (
self.q(css='section.discussion-user-threads[data-course-id="{}"]'.format(self.course_id)).present
and
self.q(css='section.user-profile a.learner-profile-link').present
and
self.q(css='section.user-profile a.learner-profile-link').text[0] == self.username
)
@wait_for_js
def is_window_on_top(self):
return self.browser.execute_script("return $('html, body').offset().top") == 0
def get_shown_thread_ids(self):
elems = self.q(css="article.discussion-thread")
return [elem.get_attribute("id")[7:] for elem in elems]
def get_current_page(self):
def check_func():
try:
current_page = int(self.q(css="nav.discussion-paginator li.current-page").text[0])
except:
return False, None
return True, current_page
return Promise(
check_func, 'discussion-paginator current page has text', timeout=5,
).fulfill()
def _check_pager(self, text, page_number=None):
"""
returns True if 'text' matches the text in any of the pagination elements. If
page_number is provided, only return True if the element points to that result
page.
"""
elems = self.q(css=self.PAGING_SELECTOR).filter(lambda elem: elem.text == text)
if page_number:
elems = elems.filter(lambda elem: int(elem.get_attribute('data-page-number')) == page_number)
return elems.present
def get_clickable_pages(self):
return sorted([
int(elem.get_attribute('data-page-number'))
for elem in self.q(css=self.PAGING_SELECTOR)
if str(elem.text).isdigit()
])
def is_prev_button_shown(self, page_number=None):
return self._check_pager(self.TEXT_PREV, page_number)
def is_next_button_shown(self, page_number=None):
return self._check_pager(self.TEXT_NEXT, page_number)
def _click_pager_with_text(self, text, page_number):
"""
click the first pagination element with whose text is `text` and ensure
the resulting page number matches `page_number`.
"""
targets = [elem for elem in self.q(css=self.PAGING_SELECTOR) if elem.text == text]
targets[0].click()
EmptyPromise(
lambda: self.get_current_page() == page_number,
"navigated to desired page"
).fulfill()
def click_prev_page(self):
self._click_pager_with_text(self.TEXT_PREV, self.get_current_page() - 1)
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
def click_next_page(self):
self._click_pager_with_text(self.TEXT_NEXT, self.get_current_page() + 1)
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
def click_on_page(self, page_number):
self._click_pager_with_text(unicode(page_number), page_number)
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
def click_on_sidebar_username(self):
self.wait_for_page()
self.q(css='.learner-profile-link').first.click()
class DiscussionTabHomePage(CoursePage, DiscussionPageMixin):
ALERT_SELECTOR = ".discussion-body .forum-nav .search-alert"
def __init__(self, browser, course_id):
super(DiscussionTabHomePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum/"
def is_browser_on_page(self):
return self.q(css=".discussion-body section.home-header").present
def perform_search(self, text="dummy"):
self.q(css=".forum-nav-search-input").fill(text + chr(10))
EmptyPromise(
self.is_ajax_finished,
"waiting for server to return result"
).fulfill()
def get_search_alert_messages(self):
return self.q(css=self.ALERT_SELECTOR + " .message").text
def get_search_alert_links(self):
return self.q(css=self.ALERT_SELECTOR + " .link-jump")
def dismiss_alert_message(self, text):
"""
dismiss any search alert message containing the specified text.
"""
def _match_messages(text):
return self.q(css=".search-alert").filter(lambda elem: text in elem.text)
for alert_id in _match_messages(text).attrs("id"):
self.q(css="{}#{} a.dismiss".format(self.ALERT_SELECTOR, alert_id)).click()
EmptyPromise(
lambda: _match_messages(text).results == [],
"waiting for dismissed alerts to disappear"
).fulfill()
def click_new_post_button(self):
"""
Clicks the 'New Post' button.
"""
self.new_post_button.click()
EmptyPromise(
lambda: (
self.new_post_form
),
"New post action succeeded"
).fulfill()
@property
def new_post_button(self):
"""
Returns the new post button.
"""
elements = self.q(css="ol.course-tabs .new-post-btn")
return elements.first if elements.visible and len(elements) == 1 else None
@property
def new_post_form(self):
"""
Returns the new post form.
"""
elements = self.q(css=".forum-new-post-form")
return elements[0] if elements.visible and len(elements) == 1 else None
| agpl-3.0 |
HiSPARC/station-software | user/python/Lib/site-packages/setuptools/py36compat.py | 313 | 2891 | import sys
from distutils.errors import DistutilsOptionError
from distutils.util import strtobool
from distutils.debug import DEBUG
class Distribution_parse_config_files:
"""
Mix-in providing forward-compatibility for functionality to be
included by default on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils.
"""
def parse_config_files(self, filenames=None):
from configparser import ConfigParser
# Ignore install directory options if we have a venv
if sys.prefix != sys.base_prefix:
ignore_options = [
'install-base', 'install-platbase', 'install-lib',
'install-platlib', 'install-purelib', 'install-headers',
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
'home', 'user', 'root']
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser(interpolation=None)
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__' and opt not in ignore_options:
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
if sys.version_info < (3,):
# Python 2 behavior is sufficient
class Distribution_parse_config_files:
pass
if False:
# When updated behavior is available upstream,
# disable override here.
class Distribution_parse_config_files:
pass
| gpl-3.0 |
nthiep/global-ssh-server | lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/creation.py | 106 | 4554 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.utils.functional import cached_property
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_ops = 'GIST_GEOMETRY_OPS'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
@cached_property
def template_postgis(self):
template_postgis = getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis')
cursor = self.connection.cursor()
cursor.execute('SELECT 1 FROM pg_database WHERE datname = %s LIMIT 1;', (template_postgis,))
if cursor.fetchone():
return template_postgis
return None
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography or self.connection.ops.geometry:
# Geography and Geometry (PostGIS 2.0+) columns are
# created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if f.geography:
index_ops = ''
elif self.connection.ops.geometry:
if f.dim > 2:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops_nd)
else:
index_ops = ''
else:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_ops + ' );')
return output
def sql_table_creation_suffix(self):
if self.template_postgis is not None:
return ' TEMPLATE %s' % (
self.connection.ops.quote_name(self.template_postgis),)
return ''
def _create_test_db(self, verbosity, autoclobber):
test_database_name = super(PostGISCreation, self)._create_test_db(verbosity, autoclobber)
if self.template_postgis is None:
# Connect to the test database in order to create the postgis extension
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
cursor = self.connection.cursor()
cursor.execute("CREATE EXTENSION postgis")
cursor.connection.commit()
return test_database_name
| agpl-3.0 |
pkappesser/youtube-dl | youtube_dl/extractor/pornotube.py | 128 | 3704 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
int_or_none,
)
class PornotubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?pornotube\.com/(?:[^?#]*?)/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.pornotube.com/orientation/straight/video/4964/title/weird-hot-and-wet-science',
'md5': '60fc5a4f0d93a97968fc7999d98260c9',
'info_dict': {
'id': '4964',
'ext': 'mp4',
'upload_date': '20141203',
'title': 'Weird Hot and Wet Science',
'description': 'md5:a8304bef7ef06cb4ab476ca6029b01b0',
'categories': ['Adult Humor', 'Blondes'],
'uploader': 'Alpha Blue Archives',
'thumbnail': 're:^https?://.*\\.jpg$',
'timestamp': 1417582800,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# Fetch origin token
js_config = self._download_webpage(
'http://www.pornotube.com/assets/src/app/config.js', video_id,
note='Download JS config')
originAuthenticationSpaceKey = self._search_regex(
r"constant\('originAuthenticationSpaceKey',\s*'([^']+)'",
js_config, 'originAuthenticationSpaceKey')
# Fetch actual token
token_req_data = {
'authenticationSpaceKey': originAuthenticationSpaceKey,
'credentials': 'Clip Application',
}
token_req = compat_urllib_request.Request(
'https://api.aebn.net/auth/v1/token/primal',
data=json.dumps(token_req_data).encode('utf-8'))
token_req.add_header('Content-Type', 'application/json')
token_req.add_header('Origin', 'http://www.pornotube.com')
token_answer = self._download_json(
token_req, video_id, note='Requesting primal token')
token = token_answer['tokenKey']
# Get video URL
delivery_req = compat_urllib_request.Request(
'https://api.aebn.net/delivery/v1/clips/%s/MP4' % video_id)
delivery_req.add_header('Authorization', token)
delivery_info = self._download_json(
delivery_req, video_id, note='Downloading delivery information')
video_url = delivery_info['mediaUrl']
# Get additional info (title etc.)
info_req = compat_urllib_request.Request(
'https://api.aebn.net/content/v1/clips/%s?expand='
'title,description,primaryImageNumber,startSecond,endSecond,'
'movie.title,movie.MovieId,movie.boxCoverFront,movie.stars,'
'movie.studios,stars.name,studios.name,categories.name,'
'clipActive,movieActive,publishDate,orientations' % video_id)
info_req.add_header('Authorization', token)
info = self._download_json(
info_req, video_id, note='Downloading metadata')
timestamp = int_or_none(info.get('publishDate'), scale=1000)
uploader = info.get('studios', [{}])[0].get('name')
movie_id = info['movie']['movieId']
thumbnail = 'http://pic.aebn.net/dis/t/%s/%s_%08d.jpg' % (
movie_id, movie_id, info['primaryImageNumber'])
categories = [c['name'] for c in info.get('categories')]
return {
'id': video_id,
'url': video_url,
'title': info['title'],
'description': info.get('description'),
'timestamp': timestamp,
'uploader': uploader,
'thumbnail': thumbnail,
'categories': categories,
'age_limit': 18,
}
| unlicense |
monikagrabowska/osf.io | api/wikis/views.py | 10 | 4490 | from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import NotFound
from rest_framework.views import Response
from api.base import permissions as base_permissions
from api.base.exceptions import Gone
from api.base.views import JSONAPIBaseView
from api.base.renderers import PlainTextRenderer
from api.wikis.permissions import ContributorOrPublic, ExcludeWithdrawals
from api.wikis.serializers import (
WikiSerializer,
NodeWikiDetailSerializer,
RegistrationWikiDetailSerializer,
)
from framework.auth.oauth_scopes import CoreScopes
from addons.wiki.models import NodeWikiPage
class WikiMixin(object):
"""Mixin with convenience methods for retrieving the wiki page based on the
URL. By default, fetches the wiki page based on the wiki_id kwarg.
"""
serializer_class = WikiSerializer
wiki_lookup_url_kwarg = 'wiki_id'
def get_wiki(self, check_permissions=True):
pk = self.kwargs[self.wiki_lookup_url_kwarg]
wiki = NodeWikiPage.load(pk)
if not wiki:
raise NotFound
if wiki.is_deleted:
raise Gone
# only show current wiki versions
if not wiki.is_current:
raise NotFound
if check_permissions:
# May raise a permission denied
self.check_object_permissions(self.request, wiki)
return wiki
class WikiDetail(JSONAPIBaseView, generics.RetrieveAPIView, WikiMixin):
"""Details about a specific wiki. *Read-only*.
###Permissions
Wiki pages on public nodes are given read-only access to everyone. Wiki pages on private nodes are only visible to
contributors and administrators on the parent node.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
##Attributes
OSF wiki entities have the "wikis" `type`.
name type description
======================================================================================================
name string name of the wiki pag
path string the path of the wiki page
materialized_path string the path of the wiki page
date_modified iso8601 timestamp timestamp when the wiki was last updated
content_type string MIME-type
current_user_can_comment boolean Whether the current user is allowed to post comments
extra object
version integer version number of the wiki
##Relationships
###User
The user who created the wiki.
###Node
The project that the wiki page belongs to.
###Comments
The comments created on the wiki page.
##Links
self: the canonical api endpoint of this wiki
info: the canonical api endpoint of this wiki
download: the link to retrive the contents of the wiki page
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.WIKI_BASE_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeWikiDetailSerializer
view_category = 'wikis'
view_name = 'wiki-detail'
def get_serializer_class(self):
if self.get_wiki().node.is_registration:
return RegistrationWikiDetailSerializer
return NodeWikiDetailSerializer
# overrides RetrieveAPIView
def get_object(self):
return self.get_wiki()
class WikiContent(JSONAPIBaseView, generics.RetrieveAPIView, WikiMixin):
""" View for rendering wiki page content."""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.WIKI_BASE_READ]
required_write_scopes = [CoreScopes.NULL]
renderer_classes = (PlainTextRenderer, )
view_category = 'wikis'
view_name = 'wiki-content'
def get_serializer_class(self):
return None
def get(self, request, **kwargs):
wiki = self.get_wiki()
return Response(wiki.content)
| apache-2.0 |
audaciouscode/Books-Mac-OS-X | Versions/Books_3.0b6/Bundled plugins/Amazon (CA).plugin/Contents/Resources/amazonScript.py | 12 | 4378 | #!/usr/bin/python
from amazon import Bag
from xml.dom.minidom import Document, parse
from difflib import SequenceMatcher
from string import replace
import amazon
import sys
searchLocale = "ca"
fieldMap = {
"Asin" : "ASIN",
"Authors" : "Authors",
"ImageUrlLarge" : "CoverImageURL",
"ImageUrlMedium" : "ImageUrlMedium",
"ImageUrlSmall" : "ImageUrlSmall",
"Isbn" : "isbn",
"ListPrice" : "originalValue",
"Manufacturer" : "publisher",
"Media" : "format",
"OurPrice" : "presentValue",
"UsedPrice" : "UsedPrice",
"ProductName" : "title",
"ReleaseDate" : "publishDate",
"URL" : "url",
"Reviews" : "reviews",
"ProductDescription" : "summary",
"Catalog" : "Catalog"
}
book = None
dom = parse ("/tmp/books-quickfill.xml")
fields = dom.getElementsByTagName ("field")
title = ""
authors = ""
publisher = ""
upc = None
isbn = None
for field in fields:
field.normalize ()
fieldData = None
if (field.firstChild != None):
fieldData = replace (replace (replace (field.firstChild.data, "&", ""), "(", ""), ")", "");
if (fieldData != None):
if (field.getAttribute ("name") == "title"):
title = fieldData
elif (field.getAttribute ("name") == "authors"):
authors = fieldData
elif (field.getAttribute ("name") == "isbn"):
isbn = fieldData
elif (field.getAttribute ("name") == "upc"):
upc = fieldData
elif (field.getAttribute ("name") == "publisher"):
publisher = fieldData
pythonBooks = None
if (isbn != None):
isbn = replace (replace (isbn, "-", ""), " ", "");
pythonBooks = amazon.searchByASIN (isbn, locale=searchLocale)
if (pythonBooks[0] != None):
book = pythonBooks[0]
# if (book == None and upc != None):
# pythonBooks = amazon.searchByUPC (upc, locale=searchLocale)
#
# if (pythonBooks[0] != None):
# book = pythonBooks[0]
if (book == None and title != ""):
query = "title:" + title
if (authors != ""):
query = query + " and author:" + authors
if (publisher != ""):
query = query + " and publisher:" + publisher
pythonBooks = amazon.searchByPower (query, locale=searchLocale)
if (pythonBooks[0] != None):
book = pythonBooks[0]
doc = Document ()
root = doc.createElement ("importedData")
doc.appendChild (root)
searchMode = "books"
if (searchLocale != "us"):
searchMode = "books-" + searchLocale
if (book != None):
collection = doc.createElement ("List")
collection.setAttribute ("name", "Amazon Import")
root.appendChild (collection)
for book in pythonBooks:
bookElement = doc.createElement ("Book")
bookElement.setAttribute ("title", book.ProductName)
for key in fieldMap.keys():
name = fieldMap[key]
if name == None:
name = key
value = None
try:
value = getattr(book, key)
except AttributeError:
pass
if (value != None):
if (isinstance (value, Bag)):
if (key == "Authors"):
authors = ""
if (isinstance (value.Author, list)):
for author in value.Author:
authors += author + ", "
else:
authors += value.Author
fieldElement = doc.createElement ("field")
fieldElement.setAttribute ("name", "authors");
textElement = doc.createTextNode (authors)
fieldElement.appendChild (textElement)
bookElement.appendChild (fieldElement)
elif (key == "Reviews"):
fieldElement = doc.createElement ("field")
fieldElement.setAttribute ("name", "hasreviews");
textElement = doc.createTextNode ("true")
fieldElement.appendChild (textElement)
bookElement.appendChild (fieldElement)
if (isinstance (value.CustomerReview, list)):
for review in value.CustomerReview:
fieldElement = doc.createElement ("field")
fieldElement.setAttribute ("name", "Review");
textElement = doc.createTextNode (review.Summary + ": " + review.Comment)
fieldElement.appendChild (textElement)
bookElement.appendChild (fieldElement)
else:
fieldElement = doc.createElement ("field")
fieldElement.setAttribute ("name", name);
textElement = doc.createTextNode (value)
fieldElement.appendChild (textElement)
bookElement.appendChild (fieldElement)
collection.appendChild (bookElement)
print doc.toprettyxml(encoding="UTF-8", indent=" ")
sys.stdout.flush()
| mit |
capitalone/cloud-custodian | c7n/resources/sagemaker.py | 1 | 23243 | # Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n.actions import BaseAction
from c7n.exceptions import PolicyValidationError
from c7n.manager import resources
from c7n.query import QueryResourceManager, TypeInfo
from c7n.utils import local_session, type_schema
from c7n.tags import RemoveTag, Tag, TagActionFilter, TagDelayedAction
from c7n.filters.vpc import SubnetFilter, SecurityGroupFilter
from c7n.filters.kms import KmsRelatedFilter
@resources.register('sagemaker-notebook')
class NotebookInstance(QueryResourceManager):
class resource_type(TypeInfo):
service = 'sagemaker'
enum_spec = ('list_notebook_instances', 'NotebookInstances', None)
detail_spec = (
'describe_notebook_instance', 'NotebookInstanceName',
'NotebookInstanceName', None)
arn = id = 'NotebookInstanceArn'
name = 'NotebookInstanceName'
date = 'CreationTime'
cfn_type = 'AWS::SageMaker::NotebookInstance'
permissions = ('sagemaker:ListTags',)
def augment(self, resources):
client = local_session(self.session_factory).client('sagemaker')
def _augment(r):
# List tags for the Notebook-Instance & set as attribute
tags = self.retry(client.list_tags,
ResourceArn=r['NotebookInstanceArn'])['Tags']
r['Tags'] = tags
return r
# Describe notebook-instance & then list tags
resources = super(NotebookInstance, self).augment(resources)
return list(map(_augment, resources))
NotebookInstance.filter_registry.register('marked-for-op', TagActionFilter)
@resources.register('sagemaker-job')
class SagemakerJob(QueryResourceManager):
class resource_type(TypeInfo):
service = 'sagemaker'
enum_spec = ('list_training_jobs', 'TrainingJobSummaries', None)
detail_spec = (
'describe_training_job', 'TrainingJobName', 'TrainingJobName', None)
arn = id = 'TrainingJobArn'
name = 'TrainingJobName'
date = 'CreationTime'
permission_augment = (
'sagemaker:DescribeTrainingJob', 'sagemaker:ListTags')
def __init__(self, ctx, data):
super(SagemakerJob, self).__init__(ctx, data)
self.queries = QueryFilter.parse(
self.data.get('query', [
{'StatusEquals': 'InProgress'}]))
def resources(self, query=None):
for q in self.queries:
if q is None:
continue
query = query or {}
for k, v in q.items():
query[k] = v
return super(SagemakerJob, self).resources(query=query)
def augment(self, jobs):
client = local_session(self.session_factory).client('sagemaker')
def _augment(j):
tags = self.retry(client.list_tags,
ResourceArn=j['TrainingJobArn'])['Tags']
j['Tags'] = tags
return j
jobs = super(SagemakerJob, self).augment(jobs)
return list(map(_augment, jobs))
@resources.register('sagemaker-transform-job')
class SagemakerTransformJob(QueryResourceManager):
class resource_type(TypeInfo):
arn_type = "transform-job"
service = 'sagemaker'
enum_spec = ('list_transform_jobs', 'TransformJobSummaries', None)
detail_spec = (
'describe_transform_job', 'TransformJobName', 'TransformJobName', None)
arn = id = 'TransformJobArn'
name = 'TransformJobName'
date = 'CreationTime'
filter_name = 'TransformJobArn'
permission_augment = ('sagemaker:DescribeTransformJob', 'sagemaker:ListTags')
def __init__(self, ctx, data):
super(SagemakerTransformJob, self).__init__(ctx, data)
self.queries = QueryFilter.parse(
self.data.get('query', [
{'StatusEquals': 'InProgress'}]))
def resources(self, query=None):
for q in self.queries:
if q is None:
continue
query = query or {}
for k, v in q.items():
query[k] = v
return super(SagemakerTransformJob, self).resources(query=query)
def augment(self, jobs):
client = local_session(self.session_factory).client('sagemaker')
def _augment(j):
tags = self.retry(client.list_tags,
ResourceArn=j['TransformJobArn'])['Tags']
j['Tags'] = tags
return j
return list(map(_augment, super(SagemakerTransformJob, self).augment(jobs)))
class QueryFilter:
JOB_FILTERS = ('StatusEquals', 'NameContains',)
@classmethod
def parse(cls, data):
results = []
names = set()
for d in data:
if not isinstance(d, dict):
raise PolicyValidationError(
"Job Query Filter Invalid structure %s" % d)
for k, v in d.items():
if isinstance(v, list):
raise ValueError(
'Job query filter invalid structure %s' % v)
query = cls(d).validate().query()
if query['Name'] in names:
# Cannot filter multiple times on the same key
continue
names.add(query['Name'])
if isinstance(query['Value'], list):
results.append({query['Name']: query['Value'][0]})
continue
results.append({query['Name']: query['Value']})
if 'StatusEquals' not in names:
# add default StatusEquals if not included
results.append({'Name': 'StatusEquals', 'Value': 'InProgress'})
return results
def __init__(self, data):
self.data = data
self.key = None
self.value = None
def validate(self):
if not len(list(self.data.keys())) == 1:
raise PolicyValidationError(
"Job Query Filter Invalid %s" % self.data)
self.key = list(self.data.keys())[0]
self.value = list(self.data.values())[0]
if self.key not in self.JOB_FILTERS and not self.key.startswith('tag:'):
raise PolicyValidationError(
"Job Query Filter invalid filter name %s" % (
self.data))
if self.value is None:
raise PolicyValidationError(
"Job Query Filters must have a value, use tag-key"
" w/ tag name as value for tag present checks"
" %s" % self.data)
return self
def query(self):
value = self.value
if isinstance(self.value, str):
value = [self.value]
return {'Name': self.key, 'Value': value}
@resources.register('sagemaker-endpoint')
class SagemakerEndpoint(QueryResourceManager):
class resource_type(TypeInfo):
service = 'sagemaker'
enum_spec = ('list_endpoints', 'Endpoints', None)
detail_spec = (
'describe_endpoint', 'EndpointName',
'EndpointName', None)
arn = id = 'EndpointArn'
name = 'EndpointName'
date = 'CreationTime'
cfn_type = 'AWS::SageMaker::Endpoint'
permissions = ('sagemaker:ListTags',)
def augment(self, endpoints):
client = local_session(self.session_factory).client('sagemaker')
def _augment(e):
tags = self.retry(client.list_tags,
ResourceArn=e['EndpointArn'])['Tags']
e['Tags'] = tags
return e
# Describe endpoints & then list tags
endpoints = super(SagemakerEndpoint, self).augment(endpoints)
return list(map(_augment, endpoints))
SagemakerEndpoint.filter_registry.register('marked-for-op', TagActionFilter)
@resources.register('sagemaker-endpoint-config')
class SagemakerEndpointConfig(QueryResourceManager):
class resource_type(TypeInfo):
service = 'sagemaker'
enum_spec = ('list_endpoint_configs', 'EndpointConfigs', None)
detail_spec = (
'describe_endpoint_config', 'EndpointConfigName',
'EndpointConfigName', None)
arn = id = 'EndpointConfigArn'
name = 'EndpointConfigName'
date = 'CreationTime'
cfn_type = 'AWS::SageMaker::EndpointConfig'
permissions = ('sagemaker:ListTags',)
def augment(self, endpoints):
client = local_session(self.session_factory).client('sagemaker')
def _augment(e):
tags = self.retry(client.list_tags,
ResourceArn=e['EndpointConfigArn'])['Tags']
e['Tags'] = tags
return e
endpoints = super(SagemakerEndpointConfig, self).augment(endpoints)
return list(map(_augment, endpoints))
SagemakerEndpointConfig.filter_registry.register('marked-for-op', TagActionFilter)
@resources.register('sagemaker-model')
class Model(QueryResourceManager):
class resource_type(TypeInfo):
service = 'sagemaker'
enum_spec = ('list_models', 'Models', None)
detail_spec = (
'describe_model', 'ModelName',
'ModelName', None)
arn = id = 'ModelArn'
name = 'ModelName'
date = 'CreationTime'
cfn_type = 'AWS::SageMaker::Model'
permissions = ('sagemaker:ListTags',)
def augment(self, resources):
client = local_session(self.session_factory).client('sagemaker')
def _augment(r):
tags = self.retry(client.list_tags,
ResourceArn=r['ModelArn'])['Tags']
r.setdefault('Tags', []).extend(tags)
return r
return list(map(_augment, resources))
Model.filter_registry.register('marked-for-op', TagActionFilter)
@SagemakerEndpoint.action_registry.register('tag')
@SagemakerEndpointConfig.action_registry.register('tag')
@NotebookInstance.action_registry.register('tag')
@SagemakerJob.action_registry.register('tag')
@SagemakerTransformJob.action_registry.register('tag')
@Model.action_registry.register('tag')
class TagNotebookInstance(Tag):
"""Action to create tag(s) on a SageMaker resource
(notebook-instance, endpoint, endpoint-config)
:example:
.. code-block:: yaml
policies:
- name: tag-sagemaker-notebook
resource: sagemaker-notebook
filters:
- "tag:target-tag": absent
actions:
- type: tag
key: target-tag
value: target-value
- name: tag-sagemaker-endpoint
resource: sagemaker-endpoint
filters:
- "tag:required-tag": absent
actions:
- type: tag
key: required-tag
value: required-value
- name: tag-sagemaker-endpoint-config
resource: sagemaker-endpoint-config
filters:
- "tag:required-tag": absent
actions:
- type: tag
key: required-tag
value: required-value
- name: tag-sagemaker-job
resource: sagemaker-job
filters:
- "tag:required-tag": absent
actions:
- type: tag
key: required-tag
value: required-value
"""
permissions = ('sagemaker:AddTags',)
def process_resource_set(self, client, resources, tags):
mid = self.manager.resource_type.id
for r in resources:
client.add_tags(ResourceArn=r[mid], Tags=tags)
@SagemakerEndpoint.action_registry.register('remove-tag')
@SagemakerEndpointConfig.action_registry.register('remove-tag')
@NotebookInstance.action_registry.register('remove-tag')
@SagemakerJob.action_registry.register('remove-tag')
@SagemakerTransformJob.action_registry.register('remove-tag')
@Model.action_registry.register('remove-tag')
class RemoveTagNotebookInstance(RemoveTag):
"""Remove tag(s) from SageMaker resources
(notebook-instance, endpoint, endpoint-config)
:example:
.. code-block:: yaml
policies:
- name: sagemaker-notebook-remove-tag
resource: sagemaker-notebook
filters:
- "tag:BadTag": present
actions:
- type: remove-tag
tags: ["BadTag"]
- name: sagemaker-endpoint-remove-tag
resource: sagemaker-endpoint
filters:
- "tag:expired-tag": present
actions:
- type: remove-tag
tags: ["expired-tag"]
- name: sagemaker-endpoint-config-remove-tag
resource: sagemaker-endpoint-config
filters:
- "tag:expired-tag": present
actions:
- type: remove-tag
tags: ["expired-tag"]
- name: sagemaker-job-remove-tag
resource: sagemaker-job
filters:
- "tag:expired-tag": present
actions:
- type: remove-tag
tags: ["expired-tag"]
"""
permissions = ('sagemaker:DeleteTags',)
def process_resource_set(self, client, resources, keys):
for r in resources:
client.delete_tags(ResourceArn=r[self.id_key], TagKeys=keys)
@SagemakerEndpoint.action_registry.register('mark-for-op')
@SagemakerEndpointConfig.action_registry.register('mark-for-op')
@NotebookInstance.action_registry.register('mark-for-op')
@Model.action_registry.register('mark-for-op')
class MarkNotebookInstanceForOp(TagDelayedAction):
"""Mark SageMaker resources for deferred action
(notebook-instance, endpoint, endpoint-config)
:example:
.. code-block:: yaml
policies:
- name: sagemaker-notebook-invalid-tag-stop
resource: sagemaker-notebook
filters:
- "tag:InvalidTag": present
actions:
- type: mark-for-op
op: stop
days: 1
- name: sagemaker-endpoint-failure-delete
resource: sagemaker-endpoint
filters:
- 'EndpointStatus': 'Failed'
actions:
- type: mark-for-op
op: delete
days: 1
- name: sagemaker-endpoint-config-invalid-size-delete
resource: sagemaker-notebook
filters:
- type: value
- key: ProductionVariants[].InstanceType
- value: 'ml.m4.10xlarge'
- op: contains
actions:
- type: mark-for-op
op: delete
days: 1
"""
@NotebookInstance.action_registry.register('start')
class StartNotebookInstance(BaseAction):
"""Start sagemaker-notebook(s)
:example:
.. code-block:: yaml
policies:
- name: start-sagemaker-notebook
resource: sagemaker-notebook
actions:
- start
"""
schema = type_schema('start')
permissions = ('sagemaker:StartNotebookInstance',)
valid_origin_states = ('Stopped',)
def process(self, resources):
resources = self.filter_resources(resources, 'NotebookInstanceStatus',
self.valid_origin_states)
if not len(resources):
return
client = local_session(self.manager.session_factory).client('sagemaker')
for n in resources:
try:
client.start_notebook_instance(
NotebookInstanceName=n['NotebookInstanceName'])
except client.exceptions.ResourceNotFound:
pass
@NotebookInstance.action_registry.register('stop')
class StopNotebookInstance(BaseAction):
"""Stop sagemaker-notebook(s)
:example:
.. code-block:: yaml
policies:
- name: stop-sagemaker-notebook
resource: sagemaker-notebook
filters:
- "tag:DeleteMe": present
actions:
- stop
"""
schema = type_schema('stop')
permissions = ('sagemaker:StopNotebookInstance',)
valid_origin_states = ('InService',)
def process(self, resources):
resources = self.filter_resources(resources, 'NotebookInstanceStatus',
self.valid_origin_states)
if not len(resources):
return
client = local_session(self.manager.session_factory).client('sagemaker')
for n in resources:
try:
client.stop_notebook_instance(
NotebookInstanceName=n['NotebookInstanceName'])
except client.exceptions.ResourceNotFound:
pass
@NotebookInstance.action_registry.register('delete')
class DeleteNotebookInstance(BaseAction):
"""Deletes sagemaker-notebook(s)
:example:
.. code-block:: yaml
policies:
- name: delete-sagemaker-notebook
resource: sagemaker-notebook
filters:
- "tag:DeleteMe": present
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('sagemaker:DeleteNotebookInstance',)
valid_origin_states = ('Stopped', 'Failed',)
def process(self, resources):
resources = self.filter_resources(resources, 'NotebookInstanceStatus',
self.valid_origin_states)
if not len(resources):
return
client = local_session(self.manager.session_factory).client('sagemaker')
for n in resources:
try:
client.delete_notebook_instance(
NotebookInstanceName=n['NotebookInstanceName'])
except client.exceptions.ResourceNotFound:
pass
@NotebookInstance.filter_registry.register('security-group')
class NotebookSecurityGroupFilter(SecurityGroupFilter):
RelatedIdsExpression = "SecurityGroups[]"
@NotebookInstance.filter_registry.register('subnet')
class NotebookSubnetFilter(SubnetFilter):
RelatedIdsExpression = "SubnetId"
@NotebookInstance.filter_registry.register('kms-key')
@SagemakerEndpointConfig.filter_registry.register('kms-key')
class NotebookKmsFilter(KmsRelatedFilter):
"""
Filter a resource by its associcated kms key and optionally the aliasname
of the kms key by using 'c7n:AliasName'
:example:
.. code-block:: yaml
policies:
- name: sagemaker-kms-key-filters
resource: aws.sagemaker-notebook
filters:
- type: kms-key
key: c7n:AliasName
value: "^(alias/aws/sagemaker)"
op: regex
- name: sagemaker-endpoint-kms-key-filters
resource: aws.sagemaker-endpoint-config
filters:
- type: kms-key
key: c7n:AliasName
value: "alias/aws/sagemaker"
"""
RelatedIdsExpression = "KmsKeyId"
@Model.action_registry.register('delete')
class DeleteModel(BaseAction):
"""Deletes sagemaker-model(s)
:example:
.. code-block:: yaml
policies:
- name: delete-sagemaker-model
resource: sagemaker-model
filters:
- "tag:DeleteMe": present
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('sagemaker:DeleteModel',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('sagemaker')
for m in resources:
try:
client.delete_model(ModelName=m['ModelName'])
except client.exceptions.ResourceNotFound:
pass
@SagemakerJob.action_registry.register('stop')
class SagemakerJobStop(BaseAction):
"""Stops a SageMaker job
:example:
.. code-block:: yaml
policies:
- name: stop-ml-job
resource: sagemaker-job
filters:
- TrainingJobName: ml-job-10
actions:
- stop
"""
schema = type_schema('stop')
permissions = ('sagemaker:StopTrainingJob',)
def process(self, jobs):
client = local_session(self.manager.session_factory).client('sagemaker')
for j in jobs:
try:
client.stop_training_job(TrainingJobName=j['TrainingJobName'])
except client.exceptions.ResourceNotFound:
pass
@SagemakerEndpoint.action_registry.register('delete')
class SagemakerEndpointDelete(BaseAction):
"""Delete a SageMaker endpoint
:example:
.. code-block:: yaml
policies:
- name: delete-sagemaker-endpoint
resource: sagemaker-endpoint
filters:
- EndpointName: sagemaker-ep--2018-01-01-00-00-00
actions:
- type: delete
"""
permissions = (
'sagemaker:DeleteEndpoint',
'sagemaker:DeleteEndpointConfig')
schema = type_schema('delete')
def process(self, endpoints):
client = local_session(self.manager.session_factory).client('sagemaker')
for e in endpoints:
try:
client.delete_endpoint(EndpointName=e['EndpointName'])
except client.exceptions.ResourceNotFound:
pass
@SagemakerEndpointConfig.action_registry.register('delete')
class SagemakerEndpointConfigDelete(BaseAction):
"""Delete a SageMaker endpoint
:example:
.. code-block:: yaml
policies:
- name: delete-sagemaker-endpoint-config
resource: sagemaker-endpoint-config
filters:
- EndpointConfigName: sagemaker-2018-01-01-00-00-00-T00
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('sagemaker:DeleteEndpointConfig',)
def process(self, endpoints):
client = local_session(self.manager.session_factory).client('sagemaker')
for e in endpoints:
try:
client.delete_endpoint_config(
EndpointConfigName=e['EndpointConfigName'])
except client.exceptions.ResourceNotFound:
pass
@SagemakerTransformJob.action_registry.register('stop')
class SagemakerTransformJobStop(BaseAction):
"""Stops a SageMaker Transform job
:example:
.. code-block:: yaml
policies:
- name: stop-tranform-job
resource: sagemaker-transform-job
filters:
- TransformJobName: ml-job-10
actions:
- stop
"""
schema = type_schema('stop')
permissions = ('sagemaker:StopTransformJob',)
def process(self, jobs):
client = local_session(self.manager.session_factory).client('sagemaker')
for j in jobs:
try:
client.stop_transform_job(TransformJobName=j['TransformJobName'])
except client.exceptions.ResourceNotFound:
pass
| apache-2.0 |
liberorbis/libernext | env/lib/python2.7/site-packages/unidecode/x08c.py | 251 | 4630 | data = (
'Yu ', # 0x00
'Shui ', # 0x01
'Shen ', # 0x02
'Diao ', # 0x03
'Chan ', # 0x04
'Liang ', # 0x05
'Zhun ', # 0x06
'Sui ', # 0x07
'Tan ', # 0x08
'Shen ', # 0x09
'Yi ', # 0x0a
'Mou ', # 0x0b
'Chen ', # 0x0c
'Die ', # 0x0d
'Huang ', # 0x0e
'Jian ', # 0x0f
'Xie ', # 0x10
'Nue ', # 0x11
'Ye ', # 0x12
'Wei ', # 0x13
'E ', # 0x14
'Yu ', # 0x15
'Xuan ', # 0x16
'Chan ', # 0x17
'Zi ', # 0x18
'An ', # 0x19
'Yan ', # 0x1a
'Di ', # 0x1b
'Mi ', # 0x1c
'Pian ', # 0x1d
'Xu ', # 0x1e
'Mo ', # 0x1f
'Dang ', # 0x20
'Su ', # 0x21
'Xie ', # 0x22
'Yao ', # 0x23
'Bang ', # 0x24
'Shi ', # 0x25
'Qian ', # 0x26
'Mi ', # 0x27
'Jin ', # 0x28
'Man ', # 0x29
'Zhe ', # 0x2a
'Jian ', # 0x2b
'Miu ', # 0x2c
'Tan ', # 0x2d
'Zen ', # 0x2e
'Qiao ', # 0x2f
'Lan ', # 0x30
'Pu ', # 0x31
'Jue ', # 0x32
'Yan ', # 0x33
'Qian ', # 0x34
'Zhan ', # 0x35
'Chen ', # 0x36
'Gu ', # 0x37
'Qian ', # 0x38
'Hong ', # 0x39
'Xia ', # 0x3a
'Jue ', # 0x3b
'Hong ', # 0x3c
'Han ', # 0x3d
'Hong ', # 0x3e
'Xi ', # 0x3f
'Xi ', # 0x40
'Huo ', # 0x41
'Liao ', # 0x42
'Han ', # 0x43
'Du ', # 0x44
'Long ', # 0x45
'Dou ', # 0x46
'Jiang ', # 0x47
'Qi ', # 0x48
'Shi ', # 0x49
'Li ', # 0x4a
'Deng ', # 0x4b
'Wan ', # 0x4c
'Bi ', # 0x4d
'Shu ', # 0x4e
'Xian ', # 0x4f
'Feng ', # 0x50
'Zhi ', # 0x51
'Zhi ', # 0x52
'Yan ', # 0x53
'Yan ', # 0x54
'Shi ', # 0x55
'Chu ', # 0x56
'Hui ', # 0x57
'Tun ', # 0x58
'Yi ', # 0x59
'Tun ', # 0x5a
'Yi ', # 0x5b
'Jian ', # 0x5c
'Ba ', # 0x5d
'Hou ', # 0x5e
'E ', # 0x5f
'Cu ', # 0x60
'Xiang ', # 0x61
'Huan ', # 0x62
'Jian ', # 0x63
'Ken ', # 0x64
'Gai ', # 0x65
'Qu ', # 0x66
'Fu ', # 0x67
'Xi ', # 0x68
'Bin ', # 0x69
'Hao ', # 0x6a
'Yu ', # 0x6b
'Zhu ', # 0x6c
'Jia ', # 0x6d
'[?] ', # 0x6e
'Xi ', # 0x6f
'Bo ', # 0x70
'Wen ', # 0x71
'Huan ', # 0x72
'Bin ', # 0x73
'Di ', # 0x74
'Zong ', # 0x75
'Fen ', # 0x76
'Yi ', # 0x77
'Zhi ', # 0x78
'Bao ', # 0x79
'Chai ', # 0x7a
'Han ', # 0x7b
'Pi ', # 0x7c
'Na ', # 0x7d
'Pi ', # 0x7e
'Gou ', # 0x7f
'Na ', # 0x80
'You ', # 0x81
'Diao ', # 0x82
'Mo ', # 0x83
'Si ', # 0x84
'Xiu ', # 0x85
'Huan ', # 0x86
'Kun ', # 0x87
'He ', # 0x88
'He ', # 0x89
'Mo ', # 0x8a
'Han ', # 0x8b
'Mao ', # 0x8c
'Li ', # 0x8d
'Ni ', # 0x8e
'Bi ', # 0x8f
'Yu ', # 0x90
'Jia ', # 0x91
'Tuan ', # 0x92
'Mao ', # 0x93
'Pi ', # 0x94
'Xi ', # 0x95
'E ', # 0x96
'Ju ', # 0x97
'Mo ', # 0x98
'Chu ', # 0x99
'Tan ', # 0x9a
'Huan ', # 0x9b
'Jue ', # 0x9c
'Bei ', # 0x9d
'Zhen ', # 0x9e
'Yuan ', # 0x9f
'Fu ', # 0xa0
'Cai ', # 0xa1
'Gong ', # 0xa2
'Te ', # 0xa3
'Yi ', # 0xa4
'Hang ', # 0xa5
'Wan ', # 0xa6
'Pin ', # 0xa7
'Huo ', # 0xa8
'Fan ', # 0xa9
'Tan ', # 0xaa
'Guan ', # 0xab
'Ze ', # 0xac
'Zhi ', # 0xad
'Er ', # 0xae
'Zhu ', # 0xaf
'Shi ', # 0xb0
'Bi ', # 0xb1
'Zi ', # 0xb2
'Er ', # 0xb3
'Gui ', # 0xb4
'Pian ', # 0xb5
'Bian ', # 0xb6
'Mai ', # 0xb7
'Dai ', # 0xb8
'Sheng ', # 0xb9
'Kuang ', # 0xba
'Fei ', # 0xbb
'Tie ', # 0xbc
'Yi ', # 0xbd
'Chi ', # 0xbe
'Mao ', # 0xbf
'He ', # 0xc0
'Bi ', # 0xc1
'Lu ', # 0xc2
'Ren ', # 0xc3
'Hui ', # 0xc4
'Gai ', # 0xc5
'Pian ', # 0xc6
'Zi ', # 0xc7
'Jia ', # 0xc8
'Xu ', # 0xc9
'Zei ', # 0xca
'Jiao ', # 0xcb
'Gai ', # 0xcc
'Zang ', # 0xcd
'Jian ', # 0xce
'Ying ', # 0xcf
'Xun ', # 0xd0
'Zhen ', # 0xd1
'She ', # 0xd2
'Bin ', # 0xd3
'Bin ', # 0xd4
'Qiu ', # 0xd5
'She ', # 0xd6
'Chuan ', # 0xd7
'Zang ', # 0xd8
'Zhou ', # 0xd9
'Lai ', # 0xda
'Zan ', # 0xdb
'Si ', # 0xdc
'Chen ', # 0xdd
'Shang ', # 0xde
'Tian ', # 0xdf
'Pei ', # 0xe0
'Geng ', # 0xe1
'Xian ', # 0xe2
'Mai ', # 0xe3
'Jian ', # 0xe4
'Sui ', # 0xe5
'Fu ', # 0xe6
'Tan ', # 0xe7
'Cong ', # 0xe8
'Cong ', # 0xe9
'Zhi ', # 0xea
'Ji ', # 0xeb
'Zhang ', # 0xec
'Du ', # 0xed
'Jin ', # 0xee
'Xiong ', # 0xef
'Shun ', # 0xf0
'Yun ', # 0xf1
'Bao ', # 0xf2
'Zai ', # 0xf3
'Lai ', # 0xf4
'Feng ', # 0xf5
'Cang ', # 0xf6
'Ji ', # 0xf7
'Sheng ', # 0xf8
'Ai ', # 0xf9
'Zhuan ', # 0xfa
'Fu ', # 0xfb
'Gou ', # 0xfc
'Sai ', # 0xfd
'Ze ', # 0xfe
'Liao ', # 0xff
)
| gpl-2.0 |
jvs/stride | stride/grammar.py | 1 | 12650 | from sourcer import *
__all__ = [
'Collection',
'CommandHandler',
'Contract',
'Data',
'Definition',
'Dollar',
'For',
'Func',
'If',
'Import',
'Introduction',
'KeyValuePair',
'ModifiedStatement',
'Name',
'NamedElement',
'Namespace',
'Operation',
'Parameter',
'Quantification',
'Record',
'ReturnStmt',
'Test',
'Tokens',
'TypeDeclaration',
'Update',
'Var',
'While',
'parse_program',
'reserved',
]
def _memoize_indent(f):
table = {}
def wrapper(indent=''):
if indent not in table:
table[indent] = f(indent)
return table[indent]
return wrapper
class TokenDefs(TokenSyntax):
def __init__(self):
self.Word = r'[_a-zA-Z][_a-zA-Z0-9]*'
self.Newline = r'[\n\r]+'
self.Indent = r'(?<=\n) +(?=[^ \n\r#])'
self.Space = Skip(r'[ \t]+')
self.LongSymbol = AnyString(
'+=', '-=', '*=', '//=', '/=',
'==', '!=', '<=', '>=',
':=', '->', '//', '...',
)
self.Symbol = AnyChar('()[]{}.,:;=+*/-<>$@')
self.RationalNumber = r'[0-9]*\.[0-9]+'
self.WholeNumber = r'[0-9]+'
self.DoubleQuotedText = r'"([^"\n\r\\]|\\[^\n\r])*"'
self.SingleQuotedText = r"'([^'\n\r\\]|\\[^\n\r])*'"
self.Comment = Skip(r'#[^\n\r]*')
Tokens = TokenDefs()
reserved = frozenset([
'and',
'by',
'assert',
'for',
'else',
'if',
'implies',
'in',
'is',
'match',
'not',
'of',
'opt',
'or',
'requires',
'then',
'to',
'try',
'using',
'yield',
])
Name = Content(Tokens.Word) ^ (lambda x: x not in reserved)
Number = Tokens.RationalNumber | Tokens.WholeNumber
String = Tokens.DoubleQuotedText | Tokens.SingleQuotedText
DataKeyword = AnyOf(
'generator',
'interface',
'resource',
'struct',
'table',
'union',
'val',
'view',
)
FuncKeyword = AnyOf(
'command',
'func',
'operator',
)
class Data(Struct):
def parse(self, indent=''):
self.keyword = DataKeyword
self.name = Opt(Name)
self.params = Opt(Params)
self.body = Opt(InlineStruct(indent) | Body(indent))
def InlineStruct(indent):
return Wrap(':') >> (Statement(indent) // Comma)
class Func(Struct):
def __init__(self,
keyword = 'func',
is_predicate = False,
name = None,
params = None,
returns = None,
body = None):
self.keyword = keyword
self.is_predicate = is_predicate
self.name = name
self.params = params or []
self.returns = returns
self.body = body
def parse(self, indent=''):
self.keyword = FuncKeyword
self.is_predicate = Opt('is') * bool
self.name = Opt(Name)
self.params = Params
self.returns = Opt(TypeAnnotation())
self.body = Opt(Initializer(indent) | Body(indent))
class Parameter(Struct):
def __init__(self, name, type=None, default=None):
self.name = name
self.type = type
self.default = default
def parse(self):
self.name = Name
self.type = Opt(TypeAnnotation())
self.default = Opt(Initializer())
class If(Struct):
def parse(self, indent=''):
self.test = Follow('if') >> Expression(indent)
self.true_case = LeadingBlock('then', indent) | FlexBody(indent)
self.false_case = Opt(TrailingBlock('else', indent))
def LeadingBlock(keyword, indent=''):
return Seek(keyword) >> (FlexBody(indent) | Seek(Expression(indent)))
def TrailingBlock(keyword, indent=''):
return MaintainIndent(indent) >> LeadingBlock(keyword, indent)
def MaintainIndent(indent):
skip = Tokens.Newline // Opt(Tokens.Indent)
token = Content(Tokens.Indent) | Return('')
return (skip >> token) ^ (lambda token: len(indent) <= len(token))
class Test(Struct):
def parse(self, indent=''):
self.keyword = 'test'
self.description = Opt(String)
self.body = FlexBody(indent)
class Namespace(Struct):
def parse(self):
self.path = 'namespace' >> Path
class Import(Struct):
def parse(self):
self.path = 'import' >> Path
self.alias = Opt(Wrap('as') >> Name)
class CommandHandler(Struct):
def parse(self, indent=''):
self.path = 'on' >> Path
self.params = Params
self.body = Initializer(indent) | Body(indent)
Seek = lambda x: OptNewlines >> x
Follow = lambda x: x << OptNewlines
Wrap = lambda x: OptNewlines >> x << OptNewlines
WrapParens = lambda x: '(' >> Wrap(x) << ')'
WrapSquare = lambda x: '[' >> Wrap(x) << ']'
OptNewlines = List(Tokens.Newline | Tokens.Indent)
Comma = Wrap(',')
Path = Content(Tokens.Word) // Wrap('.')
NotInOperator = Follow('not') >> 'in' >> Return('not in')
WrappedName = WrapParens(
NotInOperator
| Content(Tokens.Word)
| Content(Tokens.Symbol)
| Content(Tokens.LongSymbol)
)
Cont = lambda f, x: Return(x) ** f
class Record(Struct):
def parse(self):
Element = NamedElement | Expression('') | '...'
self.elements = WrapParens(Element / Comma)
class Collection(Struct):
def parse(self):
Element = KeyValuePair | Expression('')
self.elements = WrapSquare(Element / Comma)
class For(Struct):
def parse(self, indent=''):
self.target = Follow('for') >> LeftHandSide(indent)
self.source = Wrap('in') >> Expression(indent)
self.body = LoopBody(indent)
class While(Struct):
def parse(self, indent=''):
self.test = Follow('while') >> Expression(indent)
self.body = LoopBody(indent)
class Try(Struct):
def parse(self, indent=''):
self.handler = 'try' >> Opt(Definition | Expression(indent))
self.body = FlexBody(indent)
class Match(Struct):
def parse(self, indent=''):
self.subject = 'match' >> Expression(indent)
self.cases = Body(indent)
class Case(Struct):
def parse(self, indent=''):
self.is_predicate = 'case' >> Opt('is') * bool
self.test = Expression(indent)
self.body = FlexBody(indent)
class Dollar(Struct):
def parse(self):
self.target = '$' >> (Name | Number | String)
class Quantification(Struct):
def parse(self, indent=''):
OpenParams = (Parameter / Comma) * (lambda x: [x])
self.quantifier = Or('exists', 'forall')
self.params = Params | OpenParams
self.body = TypeAnnotation(indent) | Body(indent)
@_memoize_indent
def LoopBody(indent=''):
InlineExpr = KeyValuePair | Expression(indent)
InlineBody = Wrap(':') >> InlineExpr
return InlineBody | Body(indent)
@_memoize_indent
def FlexBody(indent=''):
InlineBody = Wrap(':') >> Expression(indent)
return InlineBody | Body(indent)
@_memoize_indent
def Expression(indent=''):
Reset = ForwardRef(lambda: Expression(''))
Recur = ForwardRef(lambda: Expression(indent))
PrefixOps = AnyOf('assert', 'claim', 'found', 'fn', 'spawn', 'yield')
BuildPrefix = lambda x: Operation(None, x[0], x[1])
PrefixExpression = Transform((Follow(PrefixOps), Recur), BuildPrefix)
Basic = (
WrappedName
| WrapParens(Reset)
| Record
| Collection
| Cont(If, indent)
| Cont(For, indent)
| Cont(While, indent)
| Cont(Try, indent)
| Cont(Match, indent)
| Cont(Case, indent)
| (Follow('of') >> Recur)
| Cont(Func, indent)
| Cont(Data, indent)
| PrefixExpression
| Quantification
| Name
| Number
| String
| Dollar
)
ApplicationOps = AnyOf(
('.', Content(Tokens.Word)),
(Return('[]'), WrapSquare(Reset)),
(Return(''), Basic),
)
def BuildApplications(pair):
build = lambda x, y: Operation(x, y[0], y[1])
return reduce(build, pair[1], pair[0])
Application = Transform((Basic, List(ApplicationOps)), BuildApplications)
InfixLeftW = lambda *args: InfixLeft(*(Wrap(i) for i in args))
UnaryOps = AnyOf('borrow', 'not', 'opt', 'own')
return OperatorPrecedence(
Application,
Prefix('-'),
InfixLeftW('*', '/', '//'),
InfixLeft(Wrap('+'), Follow('-')),
InfixLeftW('to'),
InfixLeftW('by'),
InfixLeftW('<', '<=', '>=', '>', 'is', 'in', NotInOperator),
InfixLeftW('==', '!='),
Prefix(Follow(UnaryOps)),
InfixLeftW('and'),
InfixLeftW('or'),
InfixRight(Wrap('implies'), Wrap('->')),
)
class NamedElement(Struct):
def parse(self):
self.name = Name | Number | String
self.value = Wrap(':') >> Expression('')
class KeyValuePair(Struct):
def parse(self):
self.key = Expression('')
self.value = Wrap(':') >> Expression('')
Params = Some(WrapParens(Parameter / Comma))
@_memoize_indent
def TypeAnnotation(indent=''):
return Wrap(':') >> Expression(indent)
@_memoize_indent
def Initializer(indent=''):
return initializer_clause(indent, '=')
@_memoize_indent
def Assignment(indent=''):
return initializer_clause(indent, ':=')
def initializer_clause(indent, operator):
return Wrap(operator) >> Expression(indent)
def flatten(list_of_lists):
return [j for i in list_of_lists for j in i]
def Block(indent=''):
Line = CurrentIndent(indent) ** InlineStatements
return (Line // Some(Tokens.Newline)) * flatten
def InlineStatements(indent):
return Statement(indent) / Some(';')
class AnnotatedStatement(Struct):
def parse(self, indent=''):
self.left = '@' >> Expression(indent)
self.right = Seek(Cont(Statement, indent))
Modifier = AnyOf('expose', 'extend', 'private')
class ModifiedStatement(Struct):
def parse(self, indent=''):
self.modifiers = Some(Modifier)
self.statement = UnmodifiedStatement(indent)
@_memoize_indent
def UnmodifiedStatement(indent=''):
return (Namespace
| Import
| Cont(ReturnStmt, indent)
| Cont(Test, indent)
| Cont(CommandHandler, indent)
| Cont(Definition, indent)
| Cont(TypeDeclaration, indent)
| Cont(Update, indent)
| Cont(Var, indent)
| Cont(Contract, indent)
| Expression(indent))
@_memoize_indent
def Statement(indent=''):
return (Cont(AnnotatedStatement, indent)
| Cont(ModifiedStatement, indent)
| UnmodifiedStatement(indent))
@_memoize_indent
def LeftHandSide(indent=''):
Intro = Cont(Introduction, indent)
Targets = Intro / Comma
return WrapParens(Targets) | Targets
class Definition(Struct):
def __init__(self, left, right):
self.left = left
self.right = right
def parse(self, indent=''):
self.left = LeftHandSide(indent)
self.right = Initializer(indent)
class Introduction(Struct):
def parse(self, indent=''):
self.name = Name
self.type = Opt(TypeAnnotation(indent))
class TypeDeclaration(Struct):
def parse(self, indent=''):
self.name = Name
self.type = TypeAnnotation(indent)
class Update(Struct):
def parse(self, indent=''):
self.left = Expression(indent)
self.operator = Wrap(AnyOf(':=', '+=', '-=', '*=', '//=', '/='))
self.right = Expression(indent)
class Var(Struct):
def parse(self, indent=''):
self.left = 'var' >> LeftHandSide(indent)
self.right = Initializer(indent) | Assignment(indent)
class Contract(Struct):
def parse(self, indent=''):
self.keyword = Follow(AnyOf('uses', 'requires', 'returns', 'yields'))
self.body = Expression(indent)
class ReturnStmt(Struct):
def __init__(self, value):
self.value = value
def parse(self, indent=''):
self.value = 'return' >> Opt(Expression(indent))
@_memoize_indent
def Body(indent=''):
return CurlyBody | (IncreaseIndent(indent) ** Block)
def CurrentIndent(indent):
default = None >> Return('')
return default if indent == '' else Term(indent)
def IncreaseIndent(current):
token = Tokens.Newline >> Expect(Content(Tokens.Indent))
return token ^ (lambda token: len(current) < len(token))
CurlyStmt = Statement() << List(Seek(';'))
CurlyBody = Seek('{') >> List(Wrap(CurlyStmt)) << Seek('}')
Heading = Some(Tokens.Newline) // Tokens.Indent
Program = Opt(Heading) >> Block() << OptNewlines
def parse_program(source):
assert isinstance(source, basestring)
return tokenize_and_parse(Tokens, Program, source)
| mit |
Chemcy/vnpy | vn.api/vn.sgit/pyscript/generate_data_type.py | 15 | 3147 | # encoding: UTF-8
__author__ = 'CHENXY'
# C++和python类型的映射字典
type_dict = {
'int': 'int',
'char': 'char',
'double': 'float',
'short': 'int',
'string': 'string'
}
def process_line(line):
"""处理每行"""
if '///' in line: # 注释
py_line = process_comment(line)
elif 'typedef' in line: # 类型申明
py_line = process_typedef(line)
elif '#define' in line: # 定义常量
py_line = process_define(line)
elif line == '\n': # 空行
py_line = line
else:
py_line = ''
return py_line
def process_comment(line):
"""处理注释"""
# if line[3] == '/':
# py_line = ''
# else:
# py_line = '#' + line[3:]
py_line = '#' + line[3:]
return py_line
def process_typedef(line):
"""处理类型申明"""
content = line.split(' ')
type_ = type_dict[content[1]]
if type_ == 'char' and '[' in line:
type_ = 'string'
keyword = content[2]
if '[' in keyword:
i = keyword.index('[')
keyword = keyword[:i]
else:
keyword = keyword.replace(';\n', '') # 删除行末分号
py_line = 'typedefDict["%s"] = "%s"\n' % (keyword, type_)
return py_line
def process_define(line):
"""处理定义常量"""
content = line.split(' ')
constant = content[1]
if len(content)>2:
value = content[-1]
py_line = 'defineDict["%s"] = %s' % (constant, value)
else:
py_line = ''
return py_line
def main():
"""主函数"""
# try:
# fcpp = open('SgitFtdcUserApiDataType.h','r')
# fpy = open('sgit_data_type.py', 'w')
# fpy.write('# encoding: UTF-8\n')
# fpy.write('\n')
# fpy.write('defineDict = {}\n')
# fpy.write('typedefDict = {}\n')
# fpy.write('\n')
# for line in fcpp:
# py_line = process_line(line)
# if py_line:
# if "#" in line and "//" in line:
# print line
# n = py_line.index("//")
# py_line = py_line[:n] + '\n'
# fpy.write(py_line.decode('gbk').encode('utf-8'))
# fcpp.close()
# fpy.close()
# print u'data_type.py生成过程完成'
# except Exception, e:
# print u'data_type.py生成过程出错: %s' %str(e)
# print py_line
fcpp = open('SgitFtdcUserApiDataType.h','r')
fpy = open('sgit_data_type.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('defineDict = {}\n')
fpy.write('typedefDict = {}\n')
fpy.write('\n')
for line in fcpp:
if '#' in line and '//' in line:
n = line.index('//')
line = line[:n] + '\n'
py_line = process_line(line)
if py_line:
# if "//" in py_line:
# print py_line
# n = py_line.index("//")
# py_line = py_line[:n] + '\n'
fpy.write(py_line.decode('gbk').encode('utf-8'))
fcpp.close()
fpy.close()
if __name__ == '__main__':
main()
| mit |
tsdmgz/ansible | contrib/inventory/openshift.py | 196 | 3274 | #!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
inventory: openshift
short_description: Openshift gears external inventory script
description:
- Generates inventory of Openshift gears using the REST interface
- this permit to reuse playbook to setup an Openshift gear
version_added: None
author: Michael Scherer
'''
try:
import json
except ImportError:
import simplejson as json
import os
import os.path
import sys
import ConfigParser
import StringIO
from ansible.module_utils.urls import open_url
configparser = None
def get_from_rhc_config(variable):
global configparser
CONF_FILE = os.path.expanduser('~/.openshift/express.conf')
if os.path.exists(CONF_FILE):
if not configparser:
ini_str = '[root]\n' + open(CONF_FILE, 'r').read()
configparser = ConfigParser.SafeConfigParser()
configparser.readfp(StringIO.StringIO(ini_str))
try:
return configparser.get('root', variable)
except ConfigParser.NoOptionError:
return None
def get_config(env_var, config_var):
result = os.getenv(env_var)
if not result:
result = get_from_rhc_config(config_var)
if not result:
sys.exit("failed=True msg='missing %s'" % env_var)
return result
def get_json_from_api(url, username, password):
headers = {'Accept': 'application/json; version=1.5'}
response = open_url(url, headers=headers, url_username=username, url_password=password)
return json.loads(response.read())['data']
username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin')
password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password')
broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server')
response = get_json_from_api(broker_url + '/domains', username, password)
response = get_json_from_api("%s/domains/%s/applications" %
(broker_url, response[0]['id']), username, password)
result = {}
for app in response:
# ssh://520311404832ce3e570000ff@blog-johndoe.example.org
(user, host) = app['ssh_url'][6:].split('@')
app_name = host.split('-')[0]
result[app_name] = {}
result[app_name]['hosts'] = []
result[app_name]['hosts'].append(host)
result[app_name]['vars'] = {}
result[app_name]['vars']['ansible_ssh_user'] = user
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({}))
else:
print("Need an argument, either --list or --host <host>")
| gpl-3.0 |
eammx/proyectosWeb | proyectoPython/env/lib/python3.6/site-packages/markupsafe/_constants.py | 27 | 4690 | # -*- coding: utf-8 -*-
"""
markupsafe._constants
~~~~~~~~~~~~~~~~~~~~~
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
HTML_ENTITIES = {
"AElig": 198,
"Aacute": 193,
"Acirc": 194,
"Agrave": 192,
"Alpha": 913,
"Aring": 197,
"Atilde": 195,
"Auml": 196,
"Beta": 914,
"Ccedil": 199,
"Chi": 935,
"Dagger": 8225,
"Delta": 916,
"ETH": 208,
"Eacute": 201,
"Ecirc": 202,
"Egrave": 200,
"Epsilon": 917,
"Eta": 919,
"Euml": 203,
"Gamma": 915,
"Iacute": 205,
"Icirc": 206,
"Igrave": 204,
"Iota": 921,
"Iuml": 207,
"Kappa": 922,
"Lambda": 923,
"Mu": 924,
"Ntilde": 209,
"Nu": 925,
"OElig": 338,
"Oacute": 211,
"Ocirc": 212,
"Ograve": 210,
"Omega": 937,
"Omicron": 927,
"Oslash": 216,
"Otilde": 213,
"Ouml": 214,
"Phi": 934,
"Pi": 928,
"Prime": 8243,
"Psi": 936,
"Rho": 929,
"Scaron": 352,
"Sigma": 931,
"THORN": 222,
"Tau": 932,
"Theta": 920,
"Uacute": 218,
"Ucirc": 219,
"Ugrave": 217,
"Upsilon": 933,
"Uuml": 220,
"Xi": 926,
"Yacute": 221,
"Yuml": 376,
"Zeta": 918,
"aacute": 225,
"acirc": 226,
"acute": 180,
"aelig": 230,
"agrave": 224,
"alefsym": 8501,
"alpha": 945,
"amp": 38,
"and": 8743,
"ang": 8736,
"apos": 39,
"aring": 229,
"asymp": 8776,
"atilde": 227,
"auml": 228,
"bdquo": 8222,
"beta": 946,
"brvbar": 166,
"bull": 8226,
"cap": 8745,
"ccedil": 231,
"cedil": 184,
"cent": 162,
"chi": 967,
"circ": 710,
"clubs": 9827,
"cong": 8773,
"copy": 169,
"crarr": 8629,
"cup": 8746,
"curren": 164,
"dArr": 8659,
"dagger": 8224,
"darr": 8595,
"deg": 176,
"delta": 948,
"diams": 9830,
"divide": 247,
"eacute": 233,
"ecirc": 234,
"egrave": 232,
"empty": 8709,
"emsp": 8195,
"ensp": 8194,
"epsilon": 949,
"equiv": 8801,
"eta": 951,
"eth": 240,
"euml": 235,
"euro": 8364,
"exist": 8707,
"fnof": 402,
"forall": 8704,
"frac12": 189,
"frac14": 188,
"frac34": 190,
"frasl": 8260,
"gamma": 947,
"ge": 8805,
"gt": 62,
"hArr": 8660,
"harr": 8596,
"hearts": 9829,
"hellip": 8230,
"iacute": 237,
"icirc": 238,
"iexcl": 161,
"igrave": 236,
"image": 8465,
"infin": 8734,
"int": 8747,
"iota": 953,
"iquest": 191,
"isin": 8712,
"iuml": 239,
"kappa": 954,
"lArr": 8656,
"lambda": 955,
"lang": 9001,
"laquo": 171,
"larr": 8592,
"lceil": 8968,
"ldquo": 8220,
"le": 8804,
"lfloor": 8970,
"lowast": 8727,
"loz": 9674,
"lrm": 8206,
"lsaquo": 8249,
"lsquo": 8216,
"lt": 60,
"macr": 175,
"mdash": 8212,
"micro": 181,
"middot": 183,
"minus": 8722,
"mu": 956,
"nabla": 8711,
"nbsp": 160,
"ndash": 8211,
"ne": 8800,
"ni": 8715,
"not": 172,
"notin": 8713,
"nsub": 8836,
"ntilde": 241,
"nu": 957,
"oacute": 243,
"ocirc": 244,
"oelig": 339,
"ograve": 242,
"oline": 8254,
"omega": 969,
"omicron": 959,
"oplus": 8853,
"or": 8744,
"ordf": 170,
"ordm": 186,
"oslash": 248,
"otilde": 245,
"otimes": 8855,
"ouml": 246,
"para": 182,
"part": 8706,
"permil": 8240,
"perp": 8869,
"phi": 966,
"pi": 960,
"piv": 982,
"plusmn": 177,
"pound": 163,
"prime": 8242,
"prod": 8719,
"prop": 8733,
"psi": 968,
"quot": 34,
"rArr": 8658,
"radic": 8730,
"rang": 9002,
"raquo": 187,
"rarr": 8594,
"rceil": 8969,
"rdquo": 8221,
"real": 8476,
"reg": 174,
"rfloor": 8971,
"rho": 961,
"rlm": 8207,
"rsaquo": 8250,
"rsquo": 8217,
"sbquo": 8218,
"scaron": 353,
"sdot": 8901,
"sect": 167,
"shy": 173,
"sigma": 963,
"sigmaf": 962,
"sim": 8764,
"spades": 9824,
"sub": 8834,
"sube": 8838,
"sum": 8721,
"sup": 8835,
"sup1": 185,
"sup2": 178,
"sup3": 179,
"supe": 8839,
"szlig": 223,
"tau": 964,
"there4": 8756,
"theta": 952,
"thetasym": 977,
"thinsp": 8201,
"thorn": 254,
"tilde": 732,
"times": 215,
"trade": 8482,
"uArr": 8657,
"uacute": 250,
"uarr": 8593,
"ucirc": 251,
"ugrave": 249,
"uml": 168,
"upsih": 978,
"upsilon": 965,
"uuml": 252,
"weierp": 8472,
"xi": 958,
"yacute": 253,
"yen": 165,
"yuml": 255,
"zeta": 950,
"zwj": 8205,
"zwnj": 8204,
}
| mit |
msduketown/SublimeKodi | libs/polib/polib.py | 1 | 61741 | # -* coding: utf-8 -*-
#
# License: MIT (see LICENSE file provided)
# vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4:
"""
**polib** allows you to manipulate, create, modify gettext files (pot, po and
mo files). You can load existing files, iterate through it's entries, add,
modify entries, comments or metadata, etc. or create new po files from scratch.
**polib** provides a simple and pythonic API via the :func:`~polib.pofile` and
:func:`~polib.mofile` convenience functions.
"""
__author__ = 'David Jean Louis <izimobil@gmail.com>'
__version__ = '1.0.6'
__all__ = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry',
'default_encoding', 'escape', 'unescape', 'detect_encoding', ]
import array
import codecs
import os
import re
import struct
import sys
import textwrap
try:
import io
except ImportError:
# replacement of io.open() for python < 2.6
# we use codecs instead
class io(object):
@staticmethod
def open(fpath, mode='r', encoding=None):
return codecs.open(fpath, mode, encoding)
# the default encoding to use when encoding cannot be detected
default_encoding = 'utf-8'
# python 2/3 compatibility helpers {{{
if sys.version_info[:2] < (3, 0):
PY3 = False
text_type = unicode
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
else:
PY3 = True
text_type = str
def b(s):
return s.encode("latin-1")
def u(s):
return s
# }}}
# _pofile_or_mofile {{{
def _pofile_or_mofile(f, type, **kwargs):
"""
Internal function used by :func:`polib.pofile` and :func:`polib.mofile` to
honor the DRY concept.
"""
# get the file encoding
enc = kwargs.get('encoding')
if enc is None:
enc = detect_encoding(f, type == 'mofile')
# parse the file
kls = type == 'pofile' and _POFileParser or _MOFileParser
parser = kls(
f,
encoding=enc,
check_for_duplicates=kwargs.get('check_for_duplicates', False),
klass=kwargs.get('klass')
)
instance = parser.parse()
instance.wrapwidth = kwargs.get('wrapwidth', 78)
return instance
# }}}
# _is_file {{{
def _is_file(filename_or_contents):
"""
Safely returns the value of os.path.exists(filename_or_contents).
Arguments:
``filename_or_contents``
either a filename, or a string holding the contents of some file.
In the latter case, this function will always return False.
"""
try:
return os.path.exists(filename_or_contents)
except (ValueError, UnicodeEncodeError):
return False
# }}}
# function pofile() {{{
def pofile(pofile, **kwargs):
"""
Convenience function that parses the po or pot file ``pofile`` and returns
a :class:`~polib.POFile` instance.
Arguments:
``pofile``
string, full or relative path to the po/pot file or its content (data).
``wrapwidth``
integer, the wrap width, only useful when the ``-w`` option was passed
to xgettext (optional, default: ``78``).
``encoding``
string, the encoding to use (e.g. "utf-8") (default: ``None``, the
encoding will be auto-detected).
``check_for_duplicates``
whether to check for duplicate entries when adding entries to the
file (optional, default: ``False``).
``klass``
class which is used to instantiate the return value (optional,
default: ``None``, the return value with be a :class:`~polib.POFile`
instance).
"""
return _pofile_or_mofile(pofile, 'pofile', **kwargs)
# }}}
# function mofile() {{{
def mofile(mofile, **kwargs):
"""
Convenience function that parses the mo file ``mofile`` and returns a
:class:`~polib.MOFile` instance.
Arguments:
``mofile``
string, full or relative path to the mo file or its content (data).
``wrapwidth``
integer, the wrap width, only useful when the ``-w`` option was passed
to xgettext to generate the po file that was used to format the mo file
(optional, default: ``78``).
``encoding``
string, the encoding to use (e.g. "utf-8") (default: ``None``, the
encoding will be auto-detected).
``check_for_duplicates``
whether to check for duplicate entries when adding entries to the
file (optional, default: ``False``).
``klass``
class which is used to instantiate the return value (optional,
default: ``None``, the return value with be a :class:`~polib.POFile`
instance).
"""
return _pofile_or_mofile(mofile, 'mofile', **kwargs)
# }}}
# function detect_encoding() {{{
def detect_encoding(file, binary_mode=False):
"""
Try to detect the encoding used by the ``file``. The ``file`` argument can
be a PO or MO file path or a string containing the contents of the file.
If the encoding cannot be detected, the function will return the value of
``default_encoding``.
Arguments:
``file``
string, full or relative path to the po/mo file or its content.
``binary_mode``
boolean, set this to True if ``file`` is a mo file.
"""
PATTERN = r'"?Content-Type:.+? charset=([\w_\-:\.]+)'
rxt = re.compile(u(PATTERN))
rxb = re.compile(b(PATTERN))
def charset_exists(charset):
"""Check whether ``charset`` is valid or not."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
if not _is_file(file):
match = rxt.search(file)
if match:
enc = match.group(1).strip()
if charset_exists(enc):
return enc
else:
# For PY3, always treat as binary
if binary_mode or PY3:
mode = 'rb'
rx = rxb
else:
mode = 'r'
rx = rxt
f = open(file, mode)
for l in f.readlines():
match = rx.search(l)
if match:
f.close()
enc = match.group(1).strip()
if not isinstance(enc, text_type):
enc = enc.decode('utf-8')
if charset_exists(enc):
return enc
f.close()
return default_encoding
# }}}
# function escape() {{{
def escape(st):
"""
Escapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
the given string ``st`` and returns it.
"""
return st.replace('\\', r'\\')\
.replace('\t', r'\t')\
.replace('\r', r'\r')\
.replace('\n', r'\n')\
.replace('\"', r'\"')
# }}}
# function unescape() {{{
def unescape(st):
"""
Unescapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
the given string ``st`` and returns it.
"""
def unescape_repl(m):
m = m.group(1)
if m == 'n':
return '\n'
if m == 't':
return '\t'
if m == 'r':
return '\r'
if m == '\\':
return '\\'
return m # handles escaped double quote
return re.sub(r'\\(\\|n|t|r|")', unescape_repl, st)
# }}}
# class _BaseFile {{{
class _BaseFile(list):
"""
Common base class for the :class:`~polib.POFile` and :class:`~polib.MOFile`
classes. This class should **not** be instanciated directly.
"""
def __init__(self, *args, **kwargs):
"""
Constructor, accepts the following keyword arguments:
``pofile``
string, the path to the po or mo file, or its content as a string.
``wrapwidth``
integer, the wrap width, only useful when the ``-w`` option was
passed to xgettext (optional, default: ``78``).
``encoding``
string, the encoding to use, defaults to ``default_encoding``
global variable (optional).
``check_for_duplicates``
whether to check for duplicate entries when adding entries to the
file, (optional, default: ``False``).
"""
list.__init__(self)
# the opened file handle
pofile = kwargs.get('pofile', None)
if pofile and _is_file(pofile):
self.fpath = pofile
else:
self.fpath = kwargs.get('fpath')
# the width at which lines should be wrapped
self.wrapwidth = kwargs.get('wrapwidth', 78)
# the file encoding
self.encoding = kwargs.get('encoding', default_encoding)
# whether to check for duplicate entries or not
self.check_for_duplicates = kwargs.get('check_for_duplicates', False)
# header
self.header = ''
# both po and mo files have metadata
self.metadata = {}
self.metadata_is_fuzzy = 0
def __unicode__(self):
"""
Returns the unicode representation of the file.
"""
ret = []
entries = [self.metadata_as_entry()] + \
[e for e in self if not e.obsolete]
for entry in entries:
ret.append(entry.__unicode__(self.wrapwidth))
for entry in self.obsolete_entries():
ret.append(entry.__unicode__(self.wrapwidth))
ret = u('\n').join(ret)
assert isinstance(ret, text_type)
#if type(ret) != text_type:
# return unicode(ret, self.encoding)
return ret
if PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
"""
Returns the string representation of the file.
"""
return unicode(self).encode(self.encoding)
def __contains__(self, entry):
"""
Overriden ``list`` method to implement the membership test (in and
not in).
The method considers that an entry is in the file if it finds an entry
that has the same msgid (the test is **case sensitive**) and the same
msgctxt (or none for both entries).
Argument:
``entry``
an instance of :class:`~polib._BaseEntry`.
"""
return self.find(entry.msgid, by='msgid', msgctxt=entry.msgctxt) \
is not None
def __eq__(self, other):
return str(self) == str(other)
def append(self, entry):
"""
Overriden method to check for duplicates entries, if a user tries to
add an entry that is already in the file, the method will raise a
``ValueError`` exception.
Argument:
``entry``
an instance of :class:`~polib._BaseEntry`.
"""
if self.check_for_duplicates and entry in self:
raise ValueError('Entry "%s" already exists' % entry.msgid)
super(_BaseFile, self).append(entry)
def insert(self, index, entry):
"""
Overriden method to check for duplicates entries, if a user tries to
add an entry that is already in the file, the method will raise a
``ValueError`` exception.
Arguments:
``index``
index at which the entry should be inserted.
``entry``
an instance of :class:`~polib._BaseEntry`.
"""
if self.check_for_duplicates and entry in self:
raise ValueError('Entry "%s" already exists' % entry.msgid)
super(_BaseFile, self).insert(index, entry)
def metadata_as_entry(self):
"""
Returns the file metadata as a :class:`~polib.POFile` instance.
"""
e = POEntry(msgid='')
mdata = self.ordered_metadata()
if mdata:
strs = []
for name, value in mdata:
# Strip whitespace off each line in a multi-line entry
strs.append('%s: %s' % (name, value))
e.msgstr = '\n'.join(strs) + '\n'
if self.metadata_is_fuzzy:
e.flags.append('fuzzy')
return e
def save(self, fpath=None, repr_method='__unicode__'):
"""
Saves the po file to ``fpath``.
If it is an existing file and no ``fpath`` is provided, then the
existing file is rewritten with the modified data.
Keyword arguments:
``fpath``
string, full or relative path to the file.
``repr_method``
string, the method to use for output.
"""
if self.fpath is None and fpath is None:
raise IOError('You must provide a file path to save() method')
contents = getattr(self, repr_method)()
if fpath is None:
fpath = self.fpath
if repr_method == 'to_binary':
fhandle = open(fpath, 'wb')
else:
fhandle = io.open(fpath, 'w', encoding=self.encoding)
if not isinstance(contents, text_type):
contents = contents.decode(self.encoding)
fhandle.write(contents)
fhandle.close()
# set the file path if not set
if self.fpath is None and fpath:
self.fpath = fpath
def find(self, st, by='msgid', include_obsolete_entries=False,
msgctxt=False):
"""
Find the entry which msgid (or property identified by the ``by``
argument) matches the string ``st``.
Keyword arguments:
``st``
string, the string to search for.
``by``
string, the property to use for comparison (default: ``msgid``).
``include_obsolete_entries``
boolean, whether to also search in entries that are obsolete.
``msgctxt``
string, allows to specify a specific message context for the
search.
"""
if include_obsolete_entries:
entries = self[:]
else:
entries = [e for e in self if not e.obsolete]
for e in entries:
if getattr(e, by) == st:
if msgctxt is not False and e.msgctxt != msgctxt:
continue
return e
return None
def ordered_metadata(self):
"""
Convenience method that returns an ordered version of the metadata
dictionary. The return value is list of tuples (metadata name,
metadata_value).
"""
# copy the dict first
metadata = self.metadata.copy()
data_order = [
'Project-Id-Version',
'Report-Msgid-Bugs-To',
'POT-Creation-Date',
'PO-Revision-Date',
'Last-Translator',
'Language-Team',
'Language',
'MIME-Version',
'Content-Type',
'Content-Transfer-Encoding',
'Plural-Forms'
]
ordered_data = []
for data in data_order:
try:
value = metadata.pop(data)
ordered_data.append((data, value))
except KeyError:
pass
# the rest of the metadata will be alphabetically ordered since there
# are no specs for this AFAIK
for data in sorted(metadata.keys()):
value = metadata[data]
ordered_data.append((data, value))
return ordered_data
def to_binary(self):
"""
Return the binary representation of the file.
"""
offsets = []
entries = self.translated_entries()
# the keys are sorted in the .mo file
def cmp(_self, other):
# msgfmt compares entries with msgctxt if it exists
self_msgid = _self.msgctxt and _self.msgctxt or _self.msgid
other_msgid = other.msgctxt and other.msgctxt or other.msgid
if self_msgid > other_msgid:
return 1
elif self_msgid < other_msgid:
return -1
else:
return 0
# add metadata entry
entries.sort(key=lambda o: o.msgctxt or o.msgid)
mentry = self.metadata_as_entry()
#mentry.msgstr = mentry.msgstr.replace('\\n', '').lstrip()
entries = [mentry] + entries
entries_len = len(entries)
ids, strs = b(''), b('')
for e in entries:
# For each string, we need size and file offset. Each string is
# NUL terminated; the NUL does not count into the size.
msgid = b('')
if e.msgctxt:
# Contexts are stored by storing the concatenation of the
# context, a <EOT> byte, and the original string
msgid = self._encode(e.msgctxt + '\4')
if e.msgid_plural:
msgstr = []
for index in sorted(e.msgstr_plural.keys()):
msgstr.append(e.msgstr_plural[index])
msgid += self._encode(e.msgid + '\0' + e.msgid_plural)
msgstr = self._encode('\0'.join(msgstr))
else:
msgid += self._encode(e.msgid)
msgstr = self._encode(e.msgstr)
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + b('\0')
strs += msgstr + b('\0')
# The header is 7 32-bit unsigned integers.
keystart = 7 * 4 + 16 * entries_len
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1 + keystart]
voffsets += [l2, o2 + valuestart]
offsets = koffsets + voffsets
output = struct.pack(
"Iiiiiii",
# Magic number
MOFile.MAGIC,
# Version
0,
# number of entries
entries_len,
# start of key index
7 * 4,
# start of value index
7 * 4 + entries_len * 8,
# size and offset of hash table, we don't use hash tables
0, keystart
)
if PY3 and sys.version_info.minor > 1: # python 3.2 or superior
output += array.array("i", offsets).tobytes()
else:
output += array.array("i", offsets).tostring()
output += ids
output += strs
return output
def _encode(self, mixed):
"""
Encodes the given ``mixed`` argument with the file encoding if and
only if it's an unicode string and returns the encoded string.
"""
if isinstance(mixed, text_type):
mixed = mixed.encode(self.encoding)
return mixed
# }}}
# class POFile {{{
class POFile(_BaseFile):
"""
Po (or Pot) file reader/writer.
This class inherits the :class:`~polib._BaseFile` class and, by extension,
the python ``list`` type.
"""
def __unicode__(self):
"""
Returns the unicode representation of the po file.
"""
ret, headers = '', self.header.split('\n')
for header in headers:
if header[:1] in [',', ':']:
ret += '#%s\n' % header
else:
ret += '# %s\n' % header
if not isinstance(ret, text_type):
ret = ret.decode(self.encoding)
return ret + _BaseFile.__unicode__(self)
def save_as_mofile(self, fpath):
"""
Saves the binary representation of the file to given ``fpath``.
Keyword argument:
``fpath``
string, full or relative path to the mo file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method that returns the percentage of translated
messages.
"""
total = len([e for e in self if not e.obsolete])
if total == 0:
return 100
translated = len(self.translated_entries())
return int(translated * 100 / float(total))
def translated_entries(self):
"""
Convenience method that returns the list of translated entries.
"""
return [e for e in self if e.translated()]
def untranslated_entries(self):
"""
Convenience method that returns the list of untranslated entries.
"""
return [e for e in self if not e.translated() and not e.obsolete
and not 'fuzzy' in e.flags]
def fuzzy_entries(self):
"""
Convenience method that returns the list of fuzzy entries.
"""
return [e for e in self if 'fuzzy' in e.flags]
def obsolete_entries(self):
"""
Convenience method that returns the list of obsolete entries.
"""
return [e for e in self if e.obsolete]
def merge(self, refpot):
"""
Convenience method that merges the current pofile with the pot file
provided. It behaves exactly as the gettext msgmerge utility:
* comments of this file will be preserved, but extracted comments and
occurrences will be discarded;
* any translations or comments in the file will be discarded, however,
dot comments and file positions will be preserved;
* the fuzzy flags are preserved.
Keyword argument:
``refpot``
object POFile, the reference catalog.
"""
# Store entries in dict/set for faster access
self_entries = dict((entry.msgid, entry) for entry in self)
refpot_msgids = set(entry.msgid for entry in refpot)
# Merge entries that are in the refpot
for entry in refpot:
e = self_entries.get(entry.msgid)
if e is None:
e = POEntry()
self.append(e)
e.merge(entry)
# ok, now we must "obsolete" entries that are not in the refpot anymore
for entry in self:
if entry.msgid not in refpot_msgids:
entry.obsolete = True
# }}}
# class MOFile {{{
class MOFile(_BaseFile):
"""
Mo file reader/writer.
This class inherits the :class:`~polib._BaseFile` class and, by
extension, the python ``list`` type.
"""
MAGIC = 0x950412de
MAGIC_SWAPPED = 0xde120495
def __init__(self, *args, **kwargs):
"""
Constructor, accepts all keywords arguments accepted by
:class:`~polib._BaseFile` class.
"""
_BaseFile.__init__(self, *args, **kwargs)
self.magic_number = None
self.version = 0
def save_as_pofile(self, fpath):
"""
Saves the mofile as a pofile to ``fpath``.
Keyword argument:
``fpath``
string, full or relative path to the file.
"""
_BaseFile.save(self, fpath)
def save(self, fpath=None):
"""
Saves the mofile to ``fpath``.
Keyword argument:
``fpath``
string, full or relative path to the file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return 100
def translated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return self
def untranslated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def fuzzy_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def obsolete_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
# }}}
# class _BaseEntry {{{
class _BaseEntry(object):
"""
Base class for :class:`~polib.POEntry` and :class:`~polib.MOEntry` classes.
This class should **not** be instanciated directly.
"""
def __init__(self, *args, **kwargs):
"""
Constructor, accepts the following keyword arguments:
``msgid``
string, the entry msgid.
``msgstr``
string, the entry msgstr.
``msgid_plural``
string, the entry msgid_plural.
``msgstr_plural``
list, the entry msgstr_plural lines.
``msgctxt``
string, the entry context (msgctxt).
``obsolete``
bool, whether the entry is "obsolete" or not.
``encoding``
string, the encoding to use, defaults to ``default_encoding``
global variable (optional).
"""
self.msgid = kwargs.get('msgid', '')
self.msgstr = kwargs.get('msgstr', '')
self.msgid_plural = kwargs.get('msgid_plural', '')
self.msgstr_plural = kwargs.get('msgstr_plural', {})
self.msgctxt = kwargs.get('msgctxt', None)
self.obsolete = kwargs.get('obsolete', False)
self.encoding = kwargs.get('encoding', default_encoding)
def __unicode__(self, wrapwidth=78):
"""
Returns the unicode representation of the entry.
"""
if self.obsolete:
delflag = '#~ '
else:
delflag = ''
ret = []
# write the msgctxt if any
if self.msgctxt is not None:
ret += self._str_field("msgctxt", delflag, "", self.msgctxt,
wrapwidth)
# write the msgid
ret += self._str_field("msgid", delflag, "", self.msgid, wrapwidth)
# write the msgid_plural if any
if self.msgid_plural:
ret += self._str_field("msgid_plural", delflag, "",
self.msgid_plural, wrapwidth)
if self.msgstr_plural:
# write the msgstr_plural if any
msgstrs = self.msgstr_plural
keys = list(msgstrs)
keys.sort()
for index in keys:
msgstr = msgstrs[index]
plural_index = '[%s]' % index
ret += self._str_field("msgstr", delflag, plural_index, msgstr,
wrapwidth)
else:
# otherwise write the msgstr
ret += self._str_field("msgstr", delflag, "", self.msgstr,
wrapwidth)
ret.append('')
ret = u('\n').join(ret)
return ret
if PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
"""
Returns the string representation of the entry.
"""
return unicode(self).encode(self.encoding)
def __eq__(self, other):
return str(self) == str(other)
def _str_field(self, fieldname, delflag, plural_index, field,
wrapwidth=78):
lines = field.splitlines(True)
if len(lines) > 1:
lines = [''] + lines # start with initial empty line
else:
escaped_field = escape(field)
specialchars_count = 0
for c in ['\\', '\n', '\r', '\t', '"']:
specialchars_count += field.count(c)
# comparison must take into account fieldname length + one space
# + 2 quotes (eg. msgid "<string>")
flength = len(fieldname) + 3
if plural_index:
flength += len(plural_index)
real_wrapwidth = wrapwidth - flength + specialchars_count
if wrapwidth > 0 and len(field) > real_wrapwidth:
# Wrap the line but take field name into account
lines = [''] + [unescape(item) for item in wrap(
escaped_field,
wrapwidth - 2, # 2 for quotes ""
drop_whitespace=False,
break_long_words=False
)]
else:
lines = [field]
if fieldname.startswith('previous_'):
# quick and dirty trick to get the real field name
fieldname = fieldname[9:]
ret = ['%s%s%s "%s"' % (delflag, fieldname, plural_index,
escape(lines.pop(0)))]
for mstr in lines:
#import pdb; pdb.set_trace()
ret.append('%s"%s"' % (delflag, escape(mstr)))
return ret
# }}}
# class POEntry {{{
class POEntry(_BaseEntry):
"""
Represents a po file entry.
"""
def __init__(self, *args, **kwargs):
"""
Constructor, accepts the following keyword arguments:
``comment``
string, the entry comment.
``tcomment``
string, the entry translator comment.
``occurrences``
list, the entry occurrences.
``flags``
list, the entry flags.
``previous_msgctxt``
string, the entry previous context.
``previous_msgid``
string, the entry previous msgid.
``previous_msgid_plural``
string, the entry previous msgid_plural.
``linenum``
integer, the line number of the entry
"""
_BaseEntry.__init__(self, *args, **kwargs)
self.comment = kwargs.get('comment', '')
self.tcomment = kwargs.get('tcomment', '')
self.occurrences = kwargs.get('occurrences', [])
self.flags = kwargs.get('flags', [])
self.previous_msgctxt = kwargs.get('previous_msgctxt', None)
self.previous_msgid = kwargs.get('previous_msgid', None)
self.previous_msgid_plural = kwargs.get('previous_msgid_plural', None)
self.linenum = kwargs.get('linenum', None)
def __unicode__(self, wrapwidth=78):
"""
Returns the unicode representation of the entry.
"""
if self.obsolete:
return _BaseEntry.__unicode__(self, wrapwidth)
ret = []
# comments first, if any (with text wrapping as xgettext does)
comments = [('comment', '#. '), ('tcomment', '# ')]
for c in comments:
val = getattr(self, c[0])
if val:
for comment in val.split('\n'):
if wrapwidth > 0 and len(comment) + len(c[1]) > wrapwidth:
ret += wrap(
comment,
wrapwidth,
initial_indent=c[1],
subsequent_indent=c[1],
break_long_words=False
)
else:
ret.append('%s%s' % (c[1], comment))
# occurrences (with text wrapping as xgettext does)
if self.occurrences:
filelist = []
for fpath, lineno in self.occurrences:
if lineno:
filelist.append('%s:%s' % (fpath, lineno))
else:
filelist.append(fpath)
filestr = ' '.join(filelist)
if wrapwidth > 0 and len(filestr) + 3 > wrapwidth:
# textwrap split words that contain hyphen, this is not
# what we want for filenames, so the dirty hack is to
# temporally replace hyphens with a char that a file cannot
# contain, like "*"
ret += [l.replace('*', '-') for l in wrap(
filestr.replace('-', '*'),
wrapwidth,
initial_indent='#: ',
subsequent_indent='#: ',
break_long_words=False
)]
else:
ret.append('#: ' + filestr)
# flags (TODO: wrapping ?)
if self.flags:
ret.append('#, %s' % ', '.join(self.flags))
# previous context and previous msgid/msgid_plural
fields = ['previous_msgctxt', 'previous_msgid',
'previous_msgid_plural']
for f in fields:
val = getattr(self, f)
if val:
ret += self._str_field(f, "#| ", "", val, wrapwidth)
ret.append(_BaseEntry.__unicode__(self, wrapwidth))
ret = u('\n').join(ret)
assert isinstance(ret, text_type)
#if type(ret) != types.UnicodeType:
# return unicode(ret, self.encoding)
return ret
def __cmp__(self, other):
"""
Called by comparison operations if rich comparison is not defined.
"""
# First: Obsolete test
if self.obsolete != other.obsolete:
if self.obsolete:
return -1
else:
return 1
# Work on a copy to protect original
occ1 = sorted(self.occurrences[:])
occ2 = sorted(other.occurrences[:])
pos = 0
for entry1 in occ1:
try:
entry2 = occ2[pos]
except IndexError:
return 1
pos = pos + 1
if entry1[0] != entry2[0]:
if entry1[0] > entry2[0]:
return 1
else:
return -1
if entry1[1] != entry2[1]:
if entry1[1] > entry2[1]:
return 1
else:
return -1
# Finally: Compare message ID
if self.msgid > other.msgid:
return 1
elif self.msgid < other.msgid:
return -1
return 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def translated(self):
"""
Returns ``True`` if the entry has been translated or ``False``
otherwise.
"""
if self.obsolete or 'fuzzy' in self.flags:
return False
if self.msgstr != '':
return True
if self.msgstr_plural:
for pos in self.msgstr_plural:
if self.msgstr_plural[pos] == '':
return False
return True
return False
def merge(self, other):
"""
Merge the current entry with the given pot entry.
"""
self.msgid = other.msgid
self.msgctxt = other.msgctxt
self.occurrences = other.occurrences
self.comment = other.comment
fuzzy = 'fuzzy' in self.flags
self.flags = other.flags[:] # clone flags
if fuzzy:
self.flags.append('fuzzy')
self.msgid_plural = other.msgid_plural
self.obsolete = other.obsolete
self.previous_msgctxt = other.previous_msgctxt
self.previous_msgid = other.previous_msgid
self.previous_msgid_plural = other.previous_msgid_plural
if other.msgstr_plural:
for pos in other.msgstr_plural:
try:
# keep existing translation at pos if any
self.msgstr_plural[pos]
except KeyError:
self.msgstr_plural[pos] = ''
def __hash__(self):
return hash((self.msgid, self.msgstr))
# }}}
# class MOEntry {{{
class MOEntry(_BaseEntry):
"""
Represents a mo file entry.
"""
def __init__(self, *args, **kwargs):
"""
Constructor, accepts the following keyword arguments,
for consistency with :class:`~polib.POEntry`:
``comment``
``tcomment``
``occurrences``
``flags``
``previous_msgctxt``
``previous_msgid``
``previous_msgid_plural``
Note: even though these keyword arguments are accepted,
they hold no real meaning in the context of MO files
and are simply ignored.
"""
_BaseEntry.__init__(self, *args, **kwargs)
self.comment = ''
self.tcomment = ''
self.occurrences = []
self.flags = []
self.previous_msgctxt = None
self.previous_msgid = None
self.previous_msgid_plural = None
def __hash__(self):
return hash((self.msgid, self.msgstr))
# }}}
# class _POFileParser {{{
class _POFileParser(object):
"""
A finite state machine to parse efficiently and correctly po
file format.
"""
def __init__(self, pofile, *args, **kwargs):
"""
Constructor.
Keyword arguments:
``pofile``
string, path to the po file or its content
``encoding``
string, the encoding to use, defaults to ``default_encoding``
global variable (optional).
``check_for_duplicates``
whether to check for duplicate entries when adding entries to the
file (optional, default: ``False``).
"""
enc = kwargs.get('encoding', default_encoding)
if _is_file(pofile):
try:
self.fhandle = io.open(pofile, 'rt', encoding=enc)
except LookupError:
enc = default_encoding
self.fhandle = io.open(pofile, 'rt', encoding=enc)
else:
self.fhandle = pofile.splitlines()
klass = kwargs.get('klass')
if klass is None:
klass = POFile
self.instance = klass(
pofile=pofile,
encoding=enc,
check_for_duplicates=kwargs.get('check_for_duplicates', False)
)
self.transitions = {}
self.current_line = 0
self.current_entry = POEntry(linenum=self.current_line)
self.current_state = 'st'
self.current_token = None
# two memo flags used in handlers
self.msgstr_index = 0
self.entry_obsolete = 0
# Configure the state machine, by adding transitions.
# Signification of symbols:
# * ST: Beginning of the file (start)
# * HE: Header
# * TC: a translation comment
# * GC: a generated comment
# * OC: a file/line occurence
# * FL: a flags line
# * CT: a message context
# * PC: a previous msgctxt
# * PM: a previous msgid
# * PP: a previous msgid_plural
# * MI: a msgid
# * MP: a msgid plural
# * MS: a msgstr
# * MX: a msgstr plural
# * MC: a msgid or msgstr continuation line
all = ['st', 'he', 'gc', 'oc', 'fl', 'ct', 'pc', 'pm', 'pp', 'tc',
'ms', 'mp', 'mx', 'mi']
self.add('tc', ['st', 'he'], 'he')
self.add('tc', ['gc', 'oc', 'fl', 'tc', 'pc', 'pm', 'pp', 'ms',
'mp', 'mx', 'mi'], 'tc')
self.add('gc', all, 'gc')
self.add('oc', all, 'oc')
self.add('fl', all, 'fl')
self.add('pc', all, 'pc')
self.add('pm', all, 'pm')
self.add('pp', all, 'pp')
self.add('ct', ['st', 'he', 'gc', 'oc', 'fl', 'tc', 'pc', 'pm',
'pp', 'ms', 'mx'], 'ct')
self.add('mi', ['st', 'he', 'gc', 'oc', 'fl', 'ct', 'tc', 'pc',
'pm', 'pp', 'ms', 'mx'], 'mi')
self.add('mp', ['tc', 'gc', 'pc', 'pm', 'pp', 'mi'], 'mp')
self.add('ms', ['mi', 'mp', 'tc'], 'ms')
self.add('mx', ['mi', 'mx', 'mp', 'tc'], 'mx')
self.add('mc', ['ct', 'mi', 'mp', 'ms', 'mx', 'pm', 'pp', 'pc'], 'mc')
def parse(self):
"""
Run the state machine, parse the file line by line and call process()
with the current matched symbol.
"""
keywords = {
'msgctxt': 'ct',
'msgid': 'mi',
'msgstr': 'ms',
'msgid_plural': 'mp',
}
prev_keywords = {
'msgid_plural': 'pp',
'msgid': 'pm',
'msgctxt': 'pc',
}
tokens = []
for line in self.fhandle:
self.current_line += 1
line = line.strip()
if line == '':
continue
tokens = line.split(None, 2)
nb_tokens = len(tokens)
if tokens[0] == '#~|':
continue
if tokens[0] == '#~' and nb_tokens > 1:
line = line[3:].strip()
tokens = tokens[1:]
nb_tokens -= 1
self.entry_obsolete = 1
else:
self.entry_obsolete = 0
# Take care of keywords like
# msgid, msgid_plural, msgctxt & msgstr.
if tokens[0] in keywords and nb_tokens > 1:
line = line[len(tokens[0]):].lstrip()
if re.search(r'([^\\]|^)"', line[1:-1]):
raise IOError('Syntax error in po file %s (line %s): '
'unescaped double quote found' %
(self.instance.fpath, self.current_line))
self.current_token = line
self.process(keywords[tokens[0]])
continue
self.current_token = line
if tokens[0] == '#:':
if nb_tokens <= 1:
continue
# we are on a occurrences line
self.process('oc')
elif line[:1] == '"':
# we are on a continuation line
if re.search(r'([^\\]|^)"', line[1:-1]):
raise IOError('Syntax error in po file %s (line %s): '
'unescaped double quote found' %
(self.instance.fpath, self.current_line))
self.process('mc')
elif line[:7] == 'msgstr[':
# we are on a msgstr plural
self.process('mx')
elif tokens[0] == '#,':
if nb_tokens <= 1:
continue
# we are on a flags line
self.process('fl')
elif tokens[0] == '#' or tokens[0].startswith('##'):
if line == '#':
line += ' '
# we are on a translator comment line
self.process('tc')
elif tokens[0] == '#.':
if nb_tokens <= 1:
continue
# we are on a generated comment line
self.process('gc')
elif tokens[0] == '#|':
if nb_tokens <= 1:
raise IOError('Syntax error in po file %s (line %s)' %
(self.instance.fpath, self.current_line))
# Remove the marker and any whitespace right after that.
line = line[2:].lstrip()
self.current_token = line
if tokens[1].startswith('"'):
# Continuation of previous metadata.
self.process('mc')
continue
if nb_tokens == 2:
# Invalid continuation line.
raise IOError('Syntax error in po file %s (line %s): '
'invalid continuation line' %
(self.instance.fpath, self.current_line))
# we are on a "previous translation" comment line,
if tokens[1] not in prev_keywords:
# Unknown keyword in previous translation comment.
raise IOError('Syntax error in po file %s (line %s): '
'unknown keyword %s' %
(self.instance.fpath, self.current_line,
tokens[1]))
# Remove the keyword and any whitespace
# between it and the starting quote.
line = line[len(tokens[1]):].lstrip()
self.current_token = line
self.process(prev_keywords[tokens[1]])
# Patch to fix parsing of Kodi po files
elif tokens[0].startswith("#"):
continue
else:
raise IOError('Syntax error in po file %s (line %s)' %
(self.instance.fpath, self.current_line))
if self.current_entry and len(tokens) > 0 and \
not tokens[0].startswith('#'):
# since entries are added when another entry is found, we must add
# the last entry here (only if there are lines). Trailing comments
# are ignored
self.instance.append(self.current_entry)
# before returning the instance, check if there's metadata and if
# so extract it in a dict
metadataentry = self.instance.find('')
if metadataentry: # metadata found
# remove the entry
self.instance.remove(metadataentry)
self.instance.metadata_is_fuzzy = metadataentry.flags
key = None
for msg in metadataentry.msgstr.splitlines():
try:
key, val = msg.split(':', 1)
self.instance.metadata[key] = val.strip()
except (ValueError, KeyError):
if key is not None:
self.instance.metadata[key] += '\n' + msg.strip()
# close opened file
if not isinstance(self.fhandle, list): # must be file
self.fhandle.close()
return self.instance
def add(self, symbol, states, next_state):
"""
Add a transition to the state machine.
Keywords arguments:
``symbol``
string, the matched token (two chars symbol).
``states``
list, a list of states (two chars symbols).
``next_state``
the next state the fsm will have after the action.
"""
for state in states:
action = getattr(self, 'handle_%s' % next_state)
self.transitions[(symbol, state)] = (action, next_state)
def process(self, symbol):
"""
Process the transition corresponding to the current state and the
symbol provided.
Keywords arguments:
``symbol``
string, the matched token (two chars symbol).
``linenum``
integer, the current line number of the parsed file.
"""
try:
(action, state) = self.transitions[(symbol, self.current_state)]
if action():
self.current_state = state
except Exception:
raise IOError('Syntax error in po file (line %s)' %
self.current_line)
# state handlers
def handle_he(self):
"""Handle a header comment."""
if self.instance.header != '':
self.instance.header += '\n'
self.instance.header += self.current_token[2:]
return 1
def handle_tc(self):
"""Handle a translator comment."""
if self.current_state in ['mc', 'ms', 'mx']:
self.instance.append(self.current_entry)
self.current_entry = POEntry(linenum=self.current_line)
if self.current_entry.tcomment != '':
self.current_entry.tcomment += '\n'
tcomment = self.current_token.lstrip('#')
if tcomment.startswith(' '):
tcomment = tcomment[1:]
self.current_entry.tcomment += tcomment
return True
def handle_gc(self):
"""Handle a generated comment."""
if self.current_state in ['mc', 'ms', 'mx']:
self.instance.append(self.current_entry)
self.current_entry = POEntry(linenum=self.current_line)
if self.current_entry.comment != '':
self.current_entry.comment += '\n'
self.current_entry.comment += self.current_token[3:]
return True
def handle_oc(self):
"""Handle a file:num occurence."""
if self.current_state in ['mc', 'ms', 'mx']:
self.instance.append(self.current_entry)
self.current_entry = POEntry(linenum=self.current_line)
occurrences = self.current_token[3:].split()
for occurrence in occurrences:
if occurrence != '':
try:
fil, line = occurrence.split(':')
if not line.isdigit():
fil = fil + line
line = ''
self.current_entry.occurrences.append((fil, line))
except (ValueError, AttributeError):
self.current_entry.occurrences.append((occurrence, ''))
return True
def handle_fl(self):
"""Handle a flags line."""
if self.current_state in ['mc', 'ms', 'mx']:
self.instance.append(self.current_entry)
self.current_entry = POEntry(linenum=self.current_line)
self.current_entry.flags += [c.strip() for c in
self.current_token[3:].split(',')]
return True
def handle_pp(self):
"""Handle a previous msgid_plural line."""
if self.current_state in ['mc', 'ms', 'mx']:
self.instance.append(self.current_entry)
self.current_entry = POEntry(linenum=self.current_line)
self.current_entry.previous_msgid_plural = \
unescape(self.current_token[1:-1])
return True
def handle_pm(self):
"""Handle a previous msgid line."""
if self.current_state in ['mc', 'ms', 'mx']:
self.instance.append(self.current_entry)
self.current_entry = POEntry(linenum=self.current_line)
self.current_entry.previous_msgid = \
unescape(self.current_token[1:-1])
return True
def handle_pc(self):
"""Handle a previous msgctxt line."""
if self.current_state in ['mc', 'ms', 'mx']:
self.instance.append(self.current_entry)
self.current_entry = POEntry(linenum=self.current_line)
self.current_entry.previous_msgctxt = \
unescape(self.current_token[1:-1])
return True
def handle_ct(self):
"""Handle a msgctxt."""
if self.current_state in ['mc', 'ms', 'mx']:
self.instance.append(self.current_entry)
self.current_entry = POEntry(linenum=self.current_line)
self.current_entry.msgctxt = unescape(self.current_token[1:-1])
return True
def handle_mi(self):
"""Handle a msgid."""
if self.current_state in ['mc', 'ms', 'mx']:
self.instance.append(self.current_entry)
self.current_entry = POEntry(linenum=self.current_line)
self.current_entry.obsolete = self.entry_obsolete
self.current_entry.msgid = unescape(self.current_token[1:-1])
return True
def handle_mp(self):
"""Handle a msgid plural."""
self.current_entry.msgid_plural = unescape(self.current_token[1:-1])
return True
def handle_ms(self):
"""Handle a msgstr."""
self.current_entry.msgstr = unescape(self.current_token[1:-1])
return True
def handle_mx(self):
"""Handle a msgstr plural."""
index, value = self.current_token[7], self.current_token[11:-1]
self.current_entry.msgstr_plural[int(index)] = unescape(value)
self.msgstr_index = int(index)
return True
def handle_mc(self):
"""Handle a msgid or msgstr continuation line."""
token = unescape(self.current_token[1:-1])
if self.current_state == 'ct':
self.current_entry.msgctxt += token
elif self.current_state == 'mi':
self.current_entry.msgid += token
elif self.current_state == 'mp':
self.current_entry.msgid_plural += token
elif self.current_state == 'ms':
self.current_entry.msgstr += token
elif self.current_state == 'mx':
self.current_entry.msgstr_plural[self.msgstr_index] += token
elif self.current_state == 'pp':
self.current_entry.previous_msgid_plural += token
elif self.current_state == 'pm':
self.current_entry.previous_msgid += token
elif self.current_state == 'pc':
self.current_entry.previous_msgctxt += token
# don't change the current state
return False
# }}}
# class _MOFileParser {{{
class _MOFileParser(object):
"""
A class to parse binary mo files.
"""
def __init__(self, mofile, *args, **kwargs):
"""
Constructor.
Keyword arguments:
``mofile``
string, path to the mo file or its content
``encoding``
string, the encoding to use, defaults to ``default_encoding``
global variable (optional).
``check_for_duplicates``
whether to check for duplicate entries when adding entries to the
file (optional, default: ``False``).
"""
self.fhandle = open(mofile, 'rb')
klass = kwargs.get('klass')
if klass is None:
klass = MOFile
self.instance = klass(
fpath=mofile,
encoding=kwargs.get('encoding', default_encoding),
check_for_duplicates=kwargs.get('check_for_duplicates', False)
)
def __del__(self):
"""
Make sure the file is closed, this prevents warnings on unclosed file
when running tests with python >= 3.2.
"""
if self.fhandle:
self.fhandle.close()
def parse(self):
"""
Build the instance with the file handle provided in the
constructor.
"""
# parse magic number
magic_number = self._readbinary('<I', 4)
if magic_number == MOFile.MAGIC:
ii = '<II'
elif magic_number == MOFile.MAGIC_SWAPPED:
ii = '>II'
else:
raise IOError('Invalid mo file, magic number is incorrect !')
self.instance.magic_number = magic_number
# parse the version number and the number of strings
version, numofstrings = self._readbinary(ii, 8)
# from MO file format specs: "A program seeing an unexpected major
# revision number should stop reading the MO file entirely"
if version not in (0, 1):
raise IOError('Invalid mo file, unexpected major revision number')
self.instance.version = version
# original strings and translation strings hash table offset
msgids_hash_offset, msgstrs_hash_offset = self._readbinary(ii, 8)
# move to msgid hash table and read length and offset of msgids
self.fhandle.seek(msgids_hash_offset)
msgids_index = []
for i in range(numofstrings):
msgids_index.append(self._readbinary(ii, 8))
# move to msgstr hash table and read length and offset of msgstrs
self.fhandle.seek(msgstrs_hash_offset)
msgstrs_index = []
for i in range(numofstrings):
msgstrs_index.append(self._readbinary(ii, 8))
# build entries
encoding = self.instance.encoding
for i in range(numofstrings):
self.fhandle.seek(msgids_index[i][1])
msgid = self.fhandle.read(msgids_index[i][0])
self.fhandle.seek(msgstrs_index[i][1])
msgstr = self.fhandle.read(msgstrs_index[i][0])
if i == 0 and not msgid: # metadata
raw_metadata, metadata = msgstr.split(b('\n')), {}
for line in raw_metadata:
tokens = line.split(b(':'), 1)
if tokens[0] != b(''):
try:
k = tokens[0].decode(encoding)
v = tokens[1].decode(encoding)
metadata[k] = v.strip()
except IndexError:
metadata[k] = u('')
self.instance.metadata = metadata
continue
# test if we have a plural entry
msgid_tokens = msgid.split(b('\0'))
if len(msgid_tokens) > 1:
entry = self._build_entry(
msgid=msgid_tokens[0],
msgid_plural=msgid_tokens[1],
msgstr_plural=dict((k, v) for k, v in
enumerate(msgstr.split(b('\0'))))
)
else:
entry = self._build_entry(msgid=msgid, msgstr=msgstr)
self.instance.append(entry)
# close opened file
self.fhandle.close()
return self.instance
def _build_entry(self, msgid, msgstr=None, msgid_plural=None,
msgstr_plural=None):
msgctxt_msgid = msgid.split(b('\x04'))
encoding = self.instance.encoding
if len(msgctxt_msgid) > 1:
kwargs = {
'msgctxt': msgctxt_msgid[0].decode(encoding),
'msgid': msgctxt_msgid[1].decode(encoding),
}
else:
kwargs = {'msgid': msgid.decode(encoding)}
if msgstr:
kwargs['msgstr'] = msgstr.decode(encoding)
if msgid_plural:
kwargs['msgid_plural'] = msgid_plural.decode(encoding)
if msgstr_plural:
for k in msgstr_plural:
msgstr_plural[k] = msgstr_plural[k].decode(encoding)
kwargs['msgstr_plural'] = msgstr_plural
return MOEntry(**kwargs)
def _readbinary(self, fmt, numbytes):
"""
Private method that unpack n bytes of data using format <fmt>.
It returns a tuple or a mixed value if the tuple length is 1.
"""
bytes = self.fhandle.read(numbytes)
tup = struct.unpack(fmt, bytes)
if len(tup) == 1:
return tup[0]
return tup
# }}}
# class TextWrapper {{{
class TextWrapper(textwrap.TextWrapper):
"""
Subclass of textwrap.TextWrapper that backport the
drop_whitespace option.
"""
def __init__(self, *args, **kwargs):
drop_whitespace = kwargs.pop('drop_whitespace', True)
textwrap.TextWrapper.__init__(self, *args, **kwargs)
self.drop_whitespace = drop_whitespace
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and not cur_line[-1].strip():
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# }}}
# function wrap() {{{
def wrap(text, width=70, **kwargs):
"""
Wrap a single paragraph of text, returning a list of wrapped lines.
"""
if sys.version_info < (2, 6):
return TextWrapper(width=width, **kwargs).wrap(text)
return textwrap.wrap(text, width=width, **kwargs)
# }}}
| gpl-3.0 |
pgmillon/ansible | lib/ansible/modules/net_tools/nios/nios_fixed_address.py | 31 | 8390 | #!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_fixed_address
version_added: "2.8"
author: "Sumit Jaiswal (@sjaiswal)"
short_description: Configure Infoblox NIOS DHCP Fixed Address
description:
- A fixed address is a specific IP address that a DHCP server
always assigns when a lease request comes from a particular
MAC address of the clien.
- Supports both IPV4 and IPV6 internet protocols
requirements:
- infoblox-client
extends_documentation_fragment: nios
options:
name:
description:
- Specifies the hostname with which fixed DHCP ip-address is stored
for respective mac.
required: false
ipaddr:
description:
- IPV4/V6 address of the fixed address.
required: true
mac:
description:
- The MAC address of the interface.
required: true
network:
description:
- Specifies the network range in which ipaddr exists.
required: true
aliases:
- network
network_view:
description:
- Configures the name of the network view to associate with this
configured instance.
required: false
default: default
options:
description:
- Configures the set of DHCP options to be included as part of
the configured network instance. This argument accepts a list
of values (see suboptions). When configuring suboptions at
least one of C(name) or C(num) must be specified.
suboptions:
name:
description:
- The name of the DHCP option to configure
num:
description:
- The number of the DHCP option to configure
value:
description:
- The value of the DHCP option specified by C(name)
required: true
use_option:
description:
- Only applies to a subset of options (see NIOS API documentation)
type: bool
default: 'yes'
vendor_class:
description:
- The name of the space this DHCP option is associated to
default: DHCP
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure ipv4 dhcp fixed address
nios_fixed_address:
name: ipv4_fixed
ipaddr: 192.168.10.1
mac: 08:6d:41:e8:fd:e8
network: 192.168.10.0/24
network_view: default
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: configure a ipv6 dhcp fixed address
nios_fixed_address:
name: ipv6_fixed
ipaddr: fe80::1/10
mac: 08:6d:41:e8:fd:e8
network: fe80::/64
network_view: default
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: set dhcp options for a ipv4 fixed address
nios_fixed_address:
name: ipv4_fixed
ipaddr: 192.168.10.1
mac: 08:6d:41:e8:fd:e8
network: 192.168.10.0/24
network_view: default
comment: this is a test comment
options:
- name: domain-name
value: ansible.com
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove a ipv4 dhcp fixed address
nios_fixed_address:
name: ipv4_fixed
ipaddr: 192.168.10.1
mac: 08:6d:41:e8:fd:e8
network: 192.168.10.0/24
network_view: default
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.net_tools.nios.api import WapiModule
from ansible.module_utils.network.common.utils import validate_ip_address, validate_ip_v6_address
from ansible.module_utils.net_tools.nios.api import NIOS_IPV4_FIXED_ADDRESS, NIOS_IPV6_FIXED_ADDRESS
def options(module):
''' Transforms the module argument into a valid WAPI struct
This function will transform the options argument into a structure that
is a valid WAPI structure in the format of:
{
name: <value>,
num: <value>,
value: <value>,
use_option: <value>,
vendor_class: <value>
}
It will remove any options that are set to None since WAPI will error on
that condition. It will also verify that either `name` or `num` is
set in the structure but does not validate the values are equal.
The remainder of the value validation is performed by WAPI
'''
options = list()
for item in module.params['options']:
opt = dict([(k, v) for k, v in iteritems(item) if v is not None])
if 'name' not in opt and 'num' not in opt:
module.fail_json(msg='one of `name` or `num` is required for option value')
options.append(opt)
return options
def validate_ip_addr_type(ip, arg_spec, module):
'''This function will check if the argument ip is type v4/v6 and return appropriate infoblox network type
'''
check_ip = ip.split('/')
if validate_ip_address(check_ip[0]) and 'ipaddr' in arg_spec:
arg_spec['ipv4addr'] = arg_spec.pop('ipaddr')
module.params['ipv4addr'] = module.params.pop('ipaddr')
return NIOS_IPV4_FIXED_ADDRESS, arg_spec, module
elif validate_ip_v6_address(check_ip[0]) and 'ipaddr' in arg_spec:
arg_spec['ipv6addr'] = arg_spec.pop('ipaddr')
module.params['ipv6addr'] = module.params.pop('ipaddr')
return NIOS_IPV6_FIXED_ADDRESS, arg_spec, module
def main():
''' Main entry point for module execution
'''
option_spec = dict(
# one of name or num is required; enforced by the function options()
name=dict(),
num=dict(type='int'),
value=dict(required=True),
use_option=dict(type='bool', default=True),
vendor_class=dict(default='DHCP')
)
ib_spec = dict(
name=dict(required=True),
ipaddr=dict(required=True, aliases=['ipaddr'], ib_req=True),
mac=dict(required=True, aliases=['mac'], ib_req=True),
network=dict(required=True, aliases=['network'], ib_req=True),
network_view=dict(default='default', aliases=['network_view']),
options=dict(type='list', elements='dict', options=option_spec, transform=options),
extattrs=dict(type='dict'),
comment=dict()
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# to get the argument ipaddr
obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
# to modify argument based on ipaddr type i.e. IPV4/IPV6
fixed_address_ip_type, ib_spec, module = validate_ip_addr_type(obj_filter['ipaddr'], ib_spec, module)
wapi = WapiModule(module)
result = wapi.run(fixed_address_ip_type, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
chokribr/inveniotest | modules/miscutil/lib/sequtils_cnum.py | 10 | 4473 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.sequtils import SequenceGenerator
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_get_field_value, create_record
from invenio.search_engine import perform_request_search
from invenio.dbquery import run_sql
class ConferenceNoStartDateError(Exception):
pass
class CnumSeq(SequenceGenerator):
"""
cnum sequence generator
"""
seq_name = 'cnum'
def _get_record_cnums(self, value):
"""
Get all the values that start with the base cnum
@param value: base cnum
@type value: string
@return: values starting by the base cnum
@rtype: tuple
"""
return run_sql("""SELECT seq_value FROM seqSTORE WHERE seq_value
LIKE %s AND seq_name=%s""",
(value + "%", self.seq_name))
def _next_value(self, recid=None, xml_record=None, start_date=None):
"""
Returns the next cnum for the given recid
@param recid: id of the record where the cnum will be generated
@type recid: int
@param xml_record: record in xml format
@type xml_record: string
@param start_date: use given start date
@type start_date: string
@return: next cnum for the given recid. Format is Cyy-mm-dd.[.1n]
@rtype: string
@raises ConferenceNoStartDateError: No date information found in the
given recid
"""
bibrecord = None
if recid is None and xml_record is not None:
bibrecord = create_record(xml_record)[0]
elif recid is not None:
bibrecord = get_bibrecord(recid)
if start_date is None and bibrecord is not None:
start_date = record_get_field_value(bibrecord,
tag="111",
ind1="",
ind2="",
code="x")
if not start_date:
raise ConferenceNoStartDateError
base_cnum = "C" + start_date[2:]
record_cnums = self._get_record_cnums(base_cnum)
if not record_cnums:
new_cnum = base_cnum
elif len(record_cnums) == 1:
new_cnum = base_cnum + '.' + '1'
else:
# Get the max current revision, cnums are in format Cyy-mm-dd,
# Cyy-mm-dd.1, Cyy-mm-dd.2
highest_revision = max([int(rev[0].split('.')[1]) for rev in record_cnums[1:]])
new_cnum = base_cnum + '.' + str(highest_revision + 1)
return new_cnum
# Helper functions to populate cnums from existing database records
def _cnum_exists(cnum):
"""
Checks existance of a given cnum in seqSTORE table
"""
return run_sql("""select seq_value from seqSTORE where seq_value=%s and seq_name='cnum'""", (cnum, ))
def _insert_cnum(cnum):
"""
Inserts a new cnum in table seqSTORE
"""
return run_sql("INSERT INTO seqSTORE (seq_name, seq_value) VALUES (%s, %s)", ("cnum", cnum))
def populate_cnums():
"""
Populates table seqSTORE with the cnums present in CONFERENCE records
"""
# First get all records from conference collection
conf_records = perform_request_search(cc="Conferences", p="111__g:C*", rg=0)
for recid in conf_records:
cnum = record_get_field_value(get_bibrecord(recid), tag="111", ind1="", ind2="", code="g")
if cnum:
if not _cnum_exists(cnum):
_insert_cnum(cnum)
print "cnum %s from record %s inserted" % (cnum, recid)
| gpl-2.0 |
Workday/OpenFrame | tools/telemetry/third_party/gsutilz/third_party/boto/boto/s3/tagging.py | 236 | 1732 | from boto import handler
import xml.sax
class Tag(object):
def __init__(self, key=None, value=None):
self.key = key
self.value = value
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.key = value
elif name == 'Value':
self.value = value
def to_xml(self):
return '<Tag><Key>%s</Key><Value>%s</Value></Tag>' % (
self.key, self.value)
def __eq__(self, other):
return (self.key == other.key and self.value == other.value)
class TagSet(list):
def startElement(self, name, attrs, connection):
if name == 'Tag':
tag = Tag()
self.append(tag)
return tag
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
def add_tag(self, key, value):
tag = Tag(key, value)
self.append(tag)
def to_xml(self):
xml = '<TagSet>'
for tag in self:
xml += tag.to_xml()
xml += '</TagSet>'
return xml
class Tags(list):
"""A container for the tags associated with a bucket."""
def startElement(self, name, attrs, connection):
if name == 'TagSet':
tag_set = TagSet()
self.append(tag_set)
return tag_set
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
def to_xml(self):
xml = '<Tagging>'
for tag_set in self:
xml += tag_set.to_xml()
xml +='</Tagging>'
return xml
def add_tag_set(self, tag_set):
self.append(tag_set)
| bsd-3-clause |
dracos/QGIS | python/plugins/db_manager/dlg_create_index.py | 6 | 3022 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : Oct 13, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
The content of this file is based on
- PG_Manager by Martin Dobias (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import Qt, SIGNAL
from PyQt4.QtGui import QDialog, QMessageBox, QApplication
from .db_plugins.plugin import DbError
from .dlg_db_error import DlgDbError
from .db_plugins.plugin import TableIndex
from .ui.ui_DlgCreateIndex import Ui_DbManagerDlgCreateIndex as Ui_Dialog
class DlgCreateIndex(QDialog, Ui_Dialog):
def __init__(self, parent=None, table=None, db=None):
QDialog.__init__(self, parent)
self.table = table
self.db = self.table.database() if self.table and self.table.database() else db
self.setupUi(self)
self.connect(self.buttonBox, SIGNAL("accepted()"), self.createIndex)
self.connect(self.cboColumn, SIGNAL("currentIndexChanged(int)"), self.columnChanged)
self.populateColumns()
def populateColumns(self):
self.cboColumn.clear()
for fld in self.table.fields():
self.cboColumn.addItem(fld.name)
def columnChanged(self):
self.editName.setText(u"idx_%s_%s" % (self.table.name, self.cboColumn.currentText()))
def createIndex(self):
idx = self.getIndex()
if idx.name == "":
QMessageBox.critical(self, self.tr("Error"), self.tr("Please enter some name for the index"))
return
# now create the index
QApplication.setOverrideCursor(Qt.WaitCursor)
try:
self.table.addIndex(idx)
except DbError, e:
DlgDbError.showError(e, self)
return
finally:
QApplication.restoreOverrideCursor()
self.accept()
def getIndex(self):
idx = TableIndex(self.table)
idx.name = self.editName.text()
idx.columns = []
colname = self.cboColumn.currentText()
for fld in self.table.fields():
if fld.name == colname:
idx.columns.append(fld.num)
break
return idx
| gpl-2.0 |
google-research/language | language/boolq/utils/ops_test.py | 1 | 2375 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from language.boolq.utils import ops
import tensorflow.compat.v1 as tf
class OpsTest(tf.test.TestCase):
def test_lowercase(self):
with self.test_session() as sess:
test_str = [["Abc%@||", "DZ dzD", ""]]
self.assertEqual(
sess.run(ops.lowercase_op(tf.convert_to_tensor(test_str))).tolist(),
[[x.lower() for x in test_str[0]]])
def test_lowercase_unicode(self):
with self.test_session() as sess:
test_str = ["ŠČŽɬЩЮɦ"]
self.assertEqual(
sess.run(ops.lowercase_op(tf.convert_to_tensor(test_str))).tolist(),
[test_str[0].lower()])
def test_bucket_by_quantiles(self):
with self.test_session() as sess:
data = tf.data.Dataset.from_tensor_slices(list(range(10))).repeat()
data = data.apply(ops.bucket_by_quantiles(
len_fn=lambda x: x, batch_size=4, n_buckets=2,
hist_bounds=[2, 4, 6, 8]))
it = data.make_initializable_iterator()
sess.run(it.initializer)
sess.run(tf.local_variables_initializer())
next_op = it.get_next()
# Let the model gather statistics, it sees 4*5=20 = 2 epochs,
# so each bin should have a count of 4
for _ in range(5):
sess.run(next_op)
counts = sess.run(tf.local_variables()[0])
self.assertEqual(counts.tolist(), [4, 8, 12, 16, 20])
# At this point the model should perfectly quantize the input
for _ in range(4):
out = sess.run(next_op)
if out[0] < 5:
self.assertAllInRange(out, 0, 5)
else:
self.assertAllInRange(out, 5, 10)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
rhdedgar/openshift-tools | openshift_tools/monitoring/pminfo_parse.py | 13 | 4733 | #!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
PMInfo - Get metrics from Performance CoPilot
This will parse the output of pminfo and return a dictionary of
the metrics and their values
This requires that pminfo is installed on the host.
pminfo will query the localhost host, not another host.
NOTE: This does the same thing as pminfo.py, but uses the binary pminfo
file to call pminfo, and parse the output. pminfo.py should be used
instead.
Examples:
# Collect known values:
import pminfo
metrics = ['quota.project.files.soft', 'kernel.all.load',
'kernel.percpu.interrupts.THR', 'kernel.all.cpu.irq.hard']
metric_dict = pminfo.get_metrics(metrics)
# Collect all values:
metric_dict = pminfo.get_metrics()
'''
import subprocess
import re
def get_metrics(metrics=None):
'''
This function can be used to do the heavy lifting.
Pass in a list of metrics that will be returned.
If nothing is passed in, all metric will be returned
'''
pminfo = PMInfo()
metrics = PMInfo.get_pminfo_metrics(metrics)
pminfo.fetch_pminfo_metrics(metrics)
pminfo.build_metric_regex(metrics)
pminfo.create_metric_dict()
pminfo.parse_pminfo()
return pminfo.metric_dict
class PMInfo(object):
'''
PMINFOParser: Performance CoPilot pminfo output parser
'''
def __init__(self):
self.data = None
self.metric_regex = None
self.metric_dict = {}
def metric_print(self):
'''
Print the metric_dict
'''
for key, value in self.metric_dict.items():
print key
print " %s" % value
def build_metric_regex(self, metrics):
'''
Build the metric regex
'''
joined_metrics = '|'.join(metrics)
metric_str = '\n(' + joined_metrics + ')\n'
self.metric_regex = re.compile(metric_str)
@staticmethod
def get_pminfo_metrics(metrics):
'''
Get a list of metrics from pminfo. Return them in a list
'''
metrics = PMInfo.run_pminfo(metric_keys=metrics).strip().split('\n')
metrics = [s.strip() for s in metrics]
return metrics
def fetch_pminfo_metrics(self, metrics):
'''
This function calls the pminfo function with the -f swith.
The -f switch 'fetches' the values from pminfo.
'''
self.data = PMInfo.run_pminfo(['-f'], metrics)
@staticmethod
def run_pminfo(args=None, metric_keys=None):
'''
Function to run pminfo command with a list of metrics
'''
cmd = ['/usr/bin/pminfo']
if args:
cmd += args
if metric_keys:
cmd += metric_keys
process = subprocess.Popen(cmd, stderr=None, stdout=subprocess.PIPE)
return process.stdout.read()
def create_metric_dict(self):
'''
Build a metric dict that will be used to collect the metrics and values
'''
split_data = re.split(self.metric_regex, self.data)
exception_list = ['No value(s) available!',
'Error: Metric not supported',
]
for i in range(1, len(split_data), 2):
if any([exce in split_data[i+1] for exce in exception_list]):
continue
self.metric_dict[split_data[i]] = split_data[i+1]
def parse_pminfo(self):
'''
Function to parse pminfo and return a dict of { metric: metric_value(s) }
'''
results = {}
inst_line = re.compile(r'\[\d+ or')
for metric, metric_value in self.metric_dict.items():
if metric_value.startswith(' value'):
metric_value = metric_value.strip()
value = metric_value.split()[1]
results[metric] = value
elif metric_value.startswith(' inst'):
insts = metric_value.split(' inst ')
for inst in insts:
if not inst:
continue
metric_subname = None
if inst_line.match(inst):
metric_subname = inst.split('"')[1].replace(" ", "_")
else:
metric_subname = inst.split('[')[1].split(']')[0]
metric_value = inst.split('] value ')[1].strip()
metric_name = metric + "." + metric_subname
results[metric_name] = metric_value
else:
print "PMINFOParser: Unknown metric key and value: %s : %s" \
% (metric, metric_value)
self.metric_dict = results
| apache-2.0 |
bratsche/Neutron-Drive | google_appengine/lib/jinja2/jinja2/meta.py | 406 | 4144 | # -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def pull_locals(self, frame):
"""Remember all undeclared identifiers."""
self.undeclared_identifiers.update(frame.identifiers.undeclared)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast)
set(['bar'])
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, basestring):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, basestring):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, basestring):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
| bsd-3-clause |
alfredoavanzosc/odoo-addons | project_partner_event_registration/models/event.py | 2 | 1660 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, api
class EventEvent(models.Model):
_inherit = 'event.event'
@api.one
def assign_partners(self):
registry_obj = self.env['event.registration']
for partner in self.project_id.partners:
if not registry_obj.search_count(
[('event_id', '=', self.id),
('partner_id', '=', partner.id)]):
registry_obj.create({
'partner_id': partner.id,
'event_id': self.id,
'name': partner.name,
'email': partner.email,
'phone': partner.phone,
'message_follower_ids': [
(4, partner.id),
(4, self.user_id.partner_id.id)]})
| agpl-3.0 |
yephper/django | django/db/models/sql/datastructures.py | 1 | 5769 | """
Useful auxiliary data structures for query construction. Not useful outside
the SQL domain.
"""
from django.db.models.sql.constants import INNER, LOUTER
class EmptyResultSet(Exception):
pass
class MultiJoin(Exception):
"""
Used by join construction code to indicate the point at which a
multi-valued join was attempted (if the caller wants to treat that
exceptionally).
"""
def __init__(self, names_pos, path_with_names):
self.level = names_pos
# The path travelled, this includes the path to the multijoin.
self.names_with_path = path_with_names
class Empty(object):
pass
class Join(object):
"""
Used by sql.Query and sql.SQLCompiler to generate JOIN clauses into the
FROM entry. For example, the SQL generated could be
LEFT OUTER JOIN "sometable" T1 ON ("othertable"."sometable_id" = "sometable"."id")
This class is primarily used in Query.alias_map. All entries in alias_map
must be Join compatible by providing the following attributes and methods:
- table_name (string)
- table_alias (possible alias for the table, can be None)
- join_type (can be None for those entries that aren't joined from
anything)
- parent_alias (which table is this join's parent, can be None similarly
to join_type)
- as_sql()
- relabeled_clone()
"""
def __init__(self, table_name, parent_alias, table_alias, join_type,
join_field, nullable):
# Join table
self.table_name = table_name
self.parent_alias = parent_alias
# Note: table_alias is not necessarily known at instantiation time.
self.table_alias = table_alias
# LOUTER or INNER
self.join_type = join_type
# A list of 2-tuples to use in the ON clause of the JOIN.
# Each 2-tuple will create one join condition in the ON clause.
self.join_cols = join_field.get_joining_columns()
# Along which field (or ForeignObjectRel in the reverse join case)
self.join_field = join_field
# Is this join nullabled?
self.nullable = nullable
def as_sql(self, compiler, connection):
"""
Generates the full
LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params
clause for this join.
"""
join_conditions = []
params = []
qn = compiler.quote_name_unless_alias
qn2 = connection.ops.quote_name
# Add a join condition for each pair of joining columns.
for index, (lhs_col, rhs_col) in enumerate(self.join_cols):
join_conditions.append('%s.%s = %s.%s' % (
qn(self.parent_alias),
qn2(lhs_col),
qn(self.table_alias),
qn2(rhs_col),
))
# Add a single condition inside parentheses for whatever
# get_extra_restriction() returns.
extra_cond = self.join_field.get_extra_restriction(
compiler.query.where_class, self.table_alias, self.parent_alias)
if extra_cond:
extra_sql, extra_params = compiler.compile(extra_cond)
join_conditions.append('(%s)' % extra_sql)
params.extend(extra_params)
if not join_conditions:
# This might be a rel on the other end of an actual declared field.
declared_field = getattr(self.join_field, 'field', self.join_field)
raise ValueError(
"Join generated an empty ON clause. %s did not yield either "
"joining columns or extra restrictions." % declared_field.__class__
)
on_clause_sql = ' AND '.join(join_conditions)
alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias)
sql = '%s %s%s ON (%s)' % (self.join_type, qn(self.table_name), alias_str, on_clause_sql)
return sql, params
def relabeled_clone(self, change_map):
new_parent_alias = change_map.get(self.parent_alias, self.parent_alias)
new_table_alias = change_map.get(self.table_alias, self.table_alias)
return self.__class__(
self.table_name, new_parent_alias, new_table_alias, self.join_type,
self.join_field, self.nullable)
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.table_name == other.table_name and
self.parent_alias == other.parent_alias and
self.join_field == other.join_field
)
return False
def demote(self):
new = self.relabeled_clone({})
new.join_type = INNER
return new
def promote(self):
new = self.relabeled_clone({})
new.join_type = LOUTER
return new
class BaseTable(object):
"""
The BaseTable class is used for base table references in FROM clause. For
example, the SQL "foo" in
SELECT * FROM "foo" WHERE somecond
could be generated by this class.
"""
join_type = None
parent_alias = None
def __init__(self, table_name, alias):
self.table_name = table_name
self.table_alias = alias
def as_sql(self, compiler, connection):
alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias)
base_sql = compiler.quote_name_unless_alias(self.table_name)
return base_sql + alias_str, []
def relabeled_clone(self, change_map):
return self.__class__(self.table_name, change_map.get(self.table_alias, self.table_alias))
| bsd-3-clause |
Zephrys/monica | monica/monica.py | 1 | 8780 | r"""
monica is a command line chef that brings you tasty food
Usage:
monica surprise
monica restaurant <restaurant-id>
monica search [QUERY ...]
monica reviews <restaurant-id>
monica budget <budget>
monica cuisine (<cuisine-id>| list)
monica configure
monica (-h |--help)
monica
Options:
-h --help Show this screen.
--version Show version.
"""
import requests
from docopt import docopt
import json
from config import configure
try:
from config import config
except:
print 'No Configuration File Found'
from config import flag
from tabulate import tabulate
import random
__version__ = '0.1.0'
headers = {'Accept' : 'application/json', 'user_key': config['api_key'], 'User-Agent': 'curl/7.35.0'}
def url_shorten(longurl):
url = 'https://www.googleapis.com/urlshortener/v1/url?key=AIzaSyA76APOb611GHyJS_7ly_l-0Btvr798LcE'
try:
response = requests.post(url, headers = {'Content-Type' : 'application/json'}, data = json.dumps({'longUrl': longurl}))
if response.status_code == 200:
data = response.json()
return data['id']
else:
return "Couldn't Shorten"
except:
return "Couldnt Shorten"
def surprise():
url = 'https://developers.zomato.com/api/v2.1/search?lat=%s&lon=%s&count=100' %(config['lat'], config['lon'])
try:
response =requests.get(url, headers = headers)
if response.status_code == 200:
data = response.json()
restaurants = data['restaurants']
while True:
if restaurants == []:
print 'Sorry nothing in your budget :('
return
choice = random.choice(restaurants)
budget = choice['restaurant']['average_cost_for_two']
if float(budget)/2 <= config['budget']:
restaurant = choice['restaurant']
break
else:
restaurants.remove(choice)
table = [[restaurant["id"] , restaurant["name"], restaurant["currency"] + " " + str(float(restaurant['average_cost_for_two'])/2) , restaurant["user_rating"]["aggregate_rating"], restaurant["location"]["locality"]]]
print tabulate(table, headers=["ID", "Name", "Budget", "Rating", "Locality"], tablefmt='fancy_grid')
else:
print 'Api Issues!'
except:
print 'Network Issues!'
def cuisine(cuisine):
if cuisine == 'list':
url = "https://developers.zomato.com/api/v2.1/cuisines?city_id=%s&lat%s&lon=%s" %(config['city_id'], config['lat'], config['lon'])
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
data = response.json()
cuisines = data['cuisines']
cuisine_list = []
for cuisine in cuisines:
cuisine = cuisine['cuisine']
cuisine_list.append([cuisine["cuisine_id"], cuisine["cuisine_name"]])
print tabulate(cuisine_list, headers=["ID", "Cuisine Name"],tablefmt='fancy_grid')
else:
print 'Api Error'
except:
print 'Network Error'
return
else:
url = "https://developers.zomato.com/api/v2.1/search?count=10&lat=%s&lon=%s&cuisines=%s&sort=cost" %(config['lat'], config['lon'], cuisine)
try:
response= requests.get(url, headers=headers)
if response.status_code == 200:
data = response.json()
count = data['results_found']
if count == 0:
print "Nothing Found!"
else:
restaurants = data["restaurants"]
restaurants_list = []
for restaurant in restaurants:
restaurant = restaurant['restaurant']
restaurants_list.append([restaurant["id"] , restaurant["name"], restaurant["currency"]
+ " " + str(float(restaurant['average_cost_for_two'])/2) , restaurant["user_rating"]["aggregate_rating"], restaurant["location"]["locality"]])
print tabulate(restaurants_list, headers=["ID", "Name", "Budget", "Rating", "Locality"],tablefmt='fancy_grid')
else:
print "API Issues"
except:
print 'Network Issues'
def restaurant(resid):
try:
url = 'https://developers.zomato.com/api/v2.1/restaurant?res_id=' + str(resid)
r = requests.get(url,headers=headers)
restaurants = []
if r.status_code != 200:
print "API Issues"
return
res = r.json()
rest = {}
rest['id'] = res['id']
rest['name'] = res['name']
rest['budget'] = float(res['average_cost_for_two'])/2
rest['menu'] = url_shorten(res['menu_url'])
rest['rating'] = res['user_rating']['aggregate_rating']
rest['locality'] = res['location']['locality']
restaurants.append(rest)
print tabulate([[i['id'], i['name'], i['budget'], i['rating'], i['locality']] for i in restaurants], headers=['ID', 'Name', 'Budget', 'Rating', 'Locality'],tablefmt='fancy_grid')
print "Find the menu at:\t", rest['menu']
except:
print "Network Issues!"
return
def reviews(id):
url = "https://developers.zomato.com/api/v2.1/reviews?res_id=%s&count=5"%(id)
try:
response = requests.get(url, headers=headers)
except:
print 'Network Issues!'
return
if response.status_code == 200:
data = response.json()
count= data["reviews_count"]
if count == 0:
print 'No Reviews!'
else:
for review in data["user_reviews"]:
review = review["review"]
print review["rating"]
print review["review_text"]
print "Posted: ",
print review["review_time_friendly"]
print "--------------"
else:
print 'Api Issues'
def search(query):
try:
url = 'https://developers.zomato.com/api/v2.1/search?q=' + str(" ".join(query)) + '&count=10&lat=' + str(config['lat']) + '&lon=' + str(config['lon'])
r = requests.get(url,headers=headers)
restaurants = []
if r.status_code != 200:
print "Api Issues"
return
if len(r.json()['restaurants']) <= 0:
print "Api Issues"
return
for res in r.json()['restaurants']:
rest = {}
rest['id'] = res['restaurant']['id']
rest['name'] = res['restaurant']['name']
rest['budget'] = res['restaurant']['currency'] + ' ' + str(float(res['restaurant']['average_cost_for_two'])/2)
rest['rating'] = res['restaurant']['user_rating']['aggregate_rating']
rest['locality'] = res['restaurant']['location']['locality']
restaurants.append(rest)
print tabulate([[i['id'], i['name'], i['budget'], i['rating'], i['locality']] for i in restaurants], headers=['ID', 'Name', 'Budget', 'Rating', 'Locality'],tablefmt='fancy_grid')
except:
print "Network Error!"
def budget(max_budget):
try:
url1 = 'https://developers.zomato.com/api/v2.1/search?q=&count=100&lat=' + str(config['lat']) + '&lon=' + str(config['lon']) +' &sort=cost&order=desc'
url2 = 'https://developers.zomato.com/api/v2.1/search?q=&count=100&lat=' + str(config['lat']) + '&lon=' + str(config['lon']) +' &sort=cost&order=asc'
r1 = requests.get(url1,headers=headers)
r2 = requests.get(url2, headers=headers)
restaurants = []
if r1.status_code != 200 or r2.status_code !=200:
print "API Issues"
return
if len(r1.json()['restaurants']) <= 0 and len(r2.json()['restaurants']) <= 0:
print "API Issues"
return
data = r1.json()['restaurants'] + r2.json()['restaurants']
for res in data:
if float(res['restaurant']['average_cost_for_two'])/2 <= int(max_budget):
rest = {}
rest['id'] = res['restaurant']['id']
rest['name'] = res['restaurant']['name']
rest['budget'] = res['restaurant']['currency'] + ' ' + str(float(res['restaurant']['average_cost_for_two'])/2)
rest['rating'] = res['restaurant']['user_rating']['aggregate_rating']
rest['locality'] = res['restaurant']['location']['locality']
restaurants.append(rest)
else:
continue
print tabulate([[i['id'], i['name'], i['budget'], i['rating'], i['locality']] for i in restaurants][:10], headers=['ID', 'Name', 'Budget', 'Rating', 'Locality'],tablefmt='fancy_grid')
except:
print "Network Issues"
return
def main():
'''monica helps you order food from the timeline'''
arguments = docopt(__doc__, version=__version__)
if arguments['configure'] and flag:
configure()
if arguments['cuisine']:
if arguments['list']:
cuisine('list')
else:
cuisine(arguments['<cuisine-id>'])
elif arguments['surprise']:
surprise()
elif arguments['reviews']:
reviews(arguments['<restaurant-id>'])
elif arguments['search']:
search(arguments['QUERY'])
elif arguments['budget']:
try:
money = arguments['<budget>']
money = float(money)
budget(money)
except:
print 'Budget should be a number!'
elif arguments['restaurant']:
restaurant(arguments['<restaurant-id>'])
else:
print (__doc__)
if __name__ == '__main__':
main() | mit |
Lh4cKg/sl4a | python/src/Lib/lib2to3/refactor.py | 49 | 19094 | #!/usr/bin/env python2.5
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import sys
import difflib
import logging
import operator
from collections import defaultdict
from itertools import chain
# Local imports
from .pgen2 import driver
from .pgen2 import tokenize
from . import pytree
from . import patcomp
from . import fixes
from . import pygram
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
def get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return get_head_types(pat.content)
return set([None]) # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = defaultdict(list)
for fixer in fixer_list:
if not fixer.pattern:
head_nodes[None].append(fixer)
continue
for t in get_head_types(fixer.pattern):
head_nodes[t].append(fixer)
return head_nodes
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function": False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: an dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
if self.options["print_function"]:
del pygram.python_grammar.keywords["print"]
self.driver = driver.Driver(pygram.python_grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.pre_order_heads = get_headnode_dict(self.pre_order)
self.post_order_heads = get_headnode_dict(self.post_order)
self.files = [] # List of files that were or should be modified
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping implicit fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, lines):
"""Called with lines of output to give to the user."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if not name.startswith(".") and name.endswith("py"):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
try:
f = open(filename)
except IOError, err:
self.log_error("Can't open %s: %s", filename, err)
return
try:
input = f.read() + "\n" # Silence certain parse errors
finally:
f.close()
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if output != input:
self.processed_file(output, filename, input, write=write)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if tree and tree.was_changed:
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename, write=write)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
try:
tree = self.driver.parse_string(data)
except Exception, err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if tree and tree.was_changed:
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
self.traverse_by(self.pre_order_heads, tree.pre_order())
self.traverse_by(self.post_order_heads, tree.post_order())
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type] + fixers[None]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None and (new != node or
str(new) != str(node)):
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False):
"""
Called when a file has been refactored, and there are changes.
"""
self.files.append(filename)
if old_text is None:
try:
f = open(filename, "r")
except IOError, err:
self.log_error("Can't read %s: %s", filename, err)
return
try:
old_text = f.read()
finally:
f.close()
if old_text == new_text:
self.log_debug("No changes to %s", filename)
return
self.print_output(diff_texts(old_text, new_text, filename))
if write:
self.write_file(new_text, filename, old_text)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
f = open(filename, "w")
except os.error, err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(new_text)
except os.error, err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception, err:
if self.log.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
return self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
| apache-2.0 |
redhat-openstack/nova | nova/tests/api/openstack/compute/contrib/test_fping.py | 14 | 3754 | # Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.contrib import fping
from nova.api.openstack import extensions
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
import nova.utils
FAKE_UUID = fakes.FAKE_UUID
def execute(*cmd, **args):
return "".join(["%s is alive" % ip for ip in cmd[1:]])
class FpingTest(test.TestCase):
def setUp(self):
super(FpingTest, self).setUp()
self.flags(verbose=True, use_ipv6=False)
return_server = fakes.fake_instance_get()
return_servers = fakes.fake_instance_get_all_by_filters()
self.stubs.Set(nova.db, "instance_get_all_by_filters",
return_servers)
self.stubs.Set(nova.db, "instance_get_by_uuid",
return_server)
self.stubs.Set(nova.utils, "execute",
execute)
self.stubs.Set(fping.FpingController, "check_fping",
lambda self: None)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = fping.FpingController(self.ext_mgr)
def test_fping_index(self):
req = fakes.HTTPRequest.blank("/v2/1234/os-fping")
res_dict = self.controller.index(req)
self.assertIn("servers", res_dict)
for srv in res_dict["servers"]:
for key in "project_id", "id", "alive":
self.assertIn(key, srv)
def test_fping_index_policy(self):
req = fakes.HTTPRequest.blank("/v2/1234/os-fping?all_tenants=1")
self.assertRaises(exception.Forbidden, self.controller.index, req)
req = fakes.HTTPRequest.blank("/v2/1234/os-fping?all_tenants=1")
req.environ["nova.context"].is_admin = True
res_dict = self.controller.index(req)
self.assertIn("servers", res_dict)
def test_fping_index_include(self):
req = fakes.HTTPRequest.blank("/v2/1234/os-fping")
res_dict = self.controller.index(req)
ids = [srv["id"] for srv in res_dict["servers"]]
req = fakes.HTTPRequest.blank("/v2/1234/os-fping?include=%s" % ids[0])
res_dict = self.controller.index(req)
self.assertEqual(len(res_dict["servers"]), 1)
self.assertEqual(res_dict["servers"][0]["id"], ids[0])
def test_fping_index_exclude(self):
req = fakes.HTTPRequest.blank("/v2/1234/os-fping")
res_dict = self.controller.index(req)
ids = [srv["id"] for srv in res_dict["servers"]]
req = fakes.HTTPRequest.blank("/v2/1234/os-fping?exclude=%s" %
",".join(ids[1:]))
res_dict = self.controller.index(req)
self.assertEqual(len(res_dict["servers"]), 1)
self.assertEqual(res_dict["servers"][0]["id"], ids[0])
def test_fping_show(self):
req = fakes.HTTPRequest.blank("/v2/1234/os-fping/%s" % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
self.assertIn("server", res_dict)
srv = res_dict["server"]
for key in "project_id", "id", "alive":
self.assertIn(key, srv)
| apache-2.0 |
rivimey/rwmapmaker | zziplib/docs/zzipdoc/functionheader.py | 14 | 3669 | from match import Match
class FunctionHeader:
""" parsing the comment block that is usually presented before
a function prototype - the prototype part is passed along
for further parsing through => FunctionPrototype """
def __init__(self, functionheaderlist, comment, prototype):
self.parent = functionheaderlist
self.comment = comment
self.prototype = prototype
self.firstline = None
self.otherlines = None
self.titleline = None
self.alsolist = []
def get_filename(self):
return self.parent.get_filename()
def parse_firstline(self):
if not self.comment: return False
x = self.comment.find("\n")
if x > 0:
self.firstline = self.comment[:x]
self.otherlines = self.comment[x:]
elif x == 0:
self.firstline = "..."
self.otherlines = self.comment[1:x]
else:
self.firstline = self.comment
self.otherlines = ""
return True
def get_firstline(self):
if self.firstline is None:
if not self.parse_firstline(): return ""
return self.firstline
def get_otherlines(self):
if self.firstline is None:
if not self.parse_firstline(): return ""
return self.otherlines
def parse_titleline(self):
""" split extra-notes from the firstline - keep only titleline """
line = self.get_firstline()
if line is None: return False
self.titleline = line
self.alsolist = []
x = line.find("also:")
if x > 0:
self.titleline = line[:x]
for also in line[x+5:].split(","):
self.alsolist += [ also.strip() ]
self._alsolist = self.alsolist
return True
def get_alsolist(self):
""" gets the see-also notes from the firstline """
if self.titleline is None:
if not self.parse_titleline(): return None
return self.alsolist
def get_titleline(self):
""" gets firstline with see-also notes removed """
if self.titleline is None:
if not self.parse_titleline(): return False
return self.titleline
def get_title(self):
""" gets titleline unless that is a redirect """
titleline = self.get_titleline()
if titleline & Match(r"^\s*=>"): return ""
if titleline & Match(r"^\s*<link>"): return ""
return titleline
def get_prototype(self):
return self.prototype
class FunctionHeaderList:
""" scan for comment blocks in the source file that are followed by
something quite like a C definition (probably a function definition).
Unpack the occurrences and fill self.comment and self.prototype. """
def __init__(self, textfile = None):
self.textfile = textfile # TextFile
self.children = None # src'style
def parse(self, textfile = None):
if textfile is not None:
self.textfile = textfile
if self.textfile is None:
return False
text = self.textfile.get_src_text()
m = Match(r"(?s)\/\*[*]+(?=\s)"
r"((?:.(?!\*\/))*.)\*\/"
r"([^/\{\}\;\#]+)[\{\;]")
self.children = []
for found in m.finditer(text):
child = FunctionHeader(self, found.group(1), found.group(2))
self.children += [ child ]
return len(self.children) > 0
def get_filename(self):
return self.textfile.get_filename()
def get_children(self):
if self.children is None:
if not self.parse(): return []
return self.children
| gpl-3.0 |
netvl/contrib-python-qubell-client | qubell/api/private/environment.py | 1 | 9095 | # Copyright (c) 2013 Qubell Inc., http://qubell.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from qubell.api.globals import ZONE_NAME, DEFAULT_ENV_NAME
from qubell.api.tools import lazyproperty
__author__ = "Vasyl Khomenko"
__copyright__ = "Copyright 2013, Qubell.com"
__license__ = "Apache"
__email__ = "vkhomenko@qubell.com"
import logging as log
import simplejson as json
import copy
from qubell.api.private import exceptions
from qubell.api.private.common import QubellEntityList, Entity
from qubell.api.provider.router import ROUTER as router
class Environment(Entity):
def __init__(self, organization, id):
self.organization = organization
self.organizationId = self.organization.organizationId
self.environmentId = self.id = id
#todo: make as properties
self.policies = []
self.markers = []
self.properties = []
@lazyproperty
def zoneId(self):
return self.json()['backend']
@lazyproperty
def services(self):
from qubell.api.private.instance import InstanceList
return InstanceList(list_json_method=self.list_services_json, organization=self)
@property
def name(self):
return self.json()['name']
@property
def isDefault(self):
return self.json()['isDefault']
def __getattr__(self, key):
resp = self.json()
if not resp.has_key(key):
raise exceptions.NotFoundError('Cannot get property %s' % key)
return resp[key] or False
@staticmethod
def new(organization, name, zone_id=None, default=False):
log.info("Creating environment: %s" % name)
if not zone_id:
zone_id = organization.zone.zoneId
data = {'isDefault': default,
'name': name,
'backend': zone_id,
'organizationId': organization.organizationId}
log.debug(data)
resp = router.post_organization_environment(org_id=organization.organizationId, data=json.dumps(data)).json()
env = Environment(organization, id=resp['id'])
log.info("Environment created: %s (%s)" % (name,env.environmentId))
return env
def restore(self, config, clean=False, timeout=10):
config = copy.deepcopy(config)
if clean:
self.clean()
for marker in config.pop('markers', []):
self.add_marker(marker)
for policy in config.pop('policies', []):
self.add_policy(policy)
for property in config.pop('properties', []):
self.add_property(**property)
for service in config.pop('services', []):
type=service.pop('type', None)
serv = self.organization.get_service(id=service.pop('id', None), name=service.pop('name'))
if not serv in self.services:
self.add_service(serv)
for service in self.services:
service.ready()
def json(self):
return router.get_environment(org_id=self.organizationId, env_id=self.environmentId).json()
def delete(self):
router.delete_environment(org_id=self.organizationId, env_id=self.environmentId)
return True
def set_as_default(self):
data = json.dumps({'environmentId': self.id})
return router.put_organization_default_environment(org_id=self.organizationId, data=data).json()
def list_available_services_json(self):
return router.get_environment_available_services(org_id=self.organizationId, env_id=self.environmentId).json()
def list_services_json(self):
return self.json()['services']
_put_environment = lambda self, data: router.put_environment(org_id=self.organizationId, env_id=self.environmentId, data=data)
def add_service(self, service):
resp = None
if service not in self.services:
time.sleep(3) # TODO: Need to wait until strategy comes up
data = self.json()
data['serviceIds'].append(service.instanceId)
data['services'].append(service.json())
log.info("Adding service %s (%s) to environment %s (%s)" % (service.name, service.id, self.name, self.id))
resp = self._put_environment(data=json.dumps(data))
if service.is_secure_vault:
user_data = service.userData
if 'defaultKey' in user_data:
key = user_data['defaultKey']
else:
key = service.regenerate()['id']
self.add_policy(
{"action": "provisionVms",
"parameter": "publicKeyId",
"value": key})
return resp.json() if resp else None
def remove_service(self, service):
data = self.json()
data['serviceIds'].remove(service.instanceId)
data['services']=[s for s in data['services'] if s['id'] != service.id]
log.info("Removing service %s (%s) from environment %s (%s)" % (service.name, service.id, self.name, self.id))
resp = self._put_environment(data=json.dumps(data))
return resp.json()
def add_marker(self, marker):
time.sleep(0.5) # TODO: Need to wait until strategy comes up
data = self.json()
data['markers'].append({'name': marker})
log.info("Adding marker %s to environment %s (%s)" % (marker, self.name, self.id))
resp = self._put_environment(data=json.dumps(data))
self.markers.append(marker)
return resp.json()
def remove_marker(self, marker):
data = self.json()
data['markers'].remove({'name': marker})
log.info("Removing marker %s from environment %s (%s)" % (marker, self.name, self.id))
resp = self._put_environment(data=json.dumps(data))
self.markers.remove(marker)
return resp.json()
def add_property(self, name, type, value):
time.sleep(0.5) # TODO: Need to wait until strategy comes up
data = self.json()
data['properties'].append({'name': name, 'type': type, 'value': value})
log.info("Adding property %s to environment %s (%s)" % (name, self.name, self.id))
resp = self._put_environment(data=json.dumps(data))
self.properties.append({'name': name, 'type': type, 'value': value})
return resp.json()
set_property = add_property
def remove_property(self, name):
data = self.json()
property = [p for p in data['properties'] if p['name'] == name]
if len(property) < 1:
log.error('Unable to remove property %s. Not found.' % name)
data['properties'].remove(property[0])
log.info("Removing property %s from environment %s (%s)" % (name, self.name, self.id))
return self._put_environment(data=json.dumps(data)).json()
def clean(self):
data = self.json()
data['serviceIds'] = []
data['services'] = []
log.info("Cleaning environment %s (%s)" % (self.name, self.id))
return self._put_environment(data=json.dumps(data)).json()
def add_policy(self, new):
time.sleep(0.5) # TODO: Need to wait until strategy comes up
data = self.json()
data['policies'].append(new)
log.info("Adding policy %s.%s to environment %s (%s)" % (new.get('action'), new.get('parameter'), self.name, self.id))
resp = self._put_environment(data=json.dumps(data))
self.policies.append(new)
return resp.json()
def remove_policy(self):
raise NotImplementedError
def set_backend(self, zone):
raise exceptions.ApiError("Change environment backend is not supported, since 24.x")
class EnvironmentList(QubellEntityList):
base_clz = Environment
@property
def default(self):
"""
Returns environment marked as default.
When Zone is set marked default makes no sense, special env with proper Zone is returned.
"""
if ZONE_NAME:
log.info("Getting or creating default environment for zone with name '{0}'".format(DEFAULT_ENV_NAME()))
zone_id = self.organization.zones[ZONE_NAME].id
return self.organization.get_or_create_environment(name=DEFAULT_ENV_NAME(), zone=zone_id)
def_envs = [env_j["id"] for env_j in self.json() if env_j["isDefault"] == True]
if len(def_envs)>1:
log.warning('Found more than one default environment. Picking last.')
return self[def_envs[-1]]
elif len(def_envs) == 1:
return self[def_envs[0]]
raise exceptions.NotFoundError('Unable to get default environment')
| apache-2.0 |
pombredanne/SourceForge-Allura | scripts/migrations/009-set_landing_page.py | 3 | 3217 | from allura.model import Neighborhood
from ming.orm import ThreadLocalORMSession
homepage = """<style type="text/css">
ul.ui-tab { display: none; }
div.content {
font-family: Helvetica;
}
div.content div.row > div.column {
width: 100%
}
div.welcome { margin: 2em 0; }
div.welcome p {
display: block;
position: relative;
left: 8em;
width: 80%;
}
div.welcome a {
display: inline-block;
font-weight: 600;
color: white;
margin-left: 1.5em;
padding: 0.5em 1.5em 0.45em 1.5em;
text-decoration: none;
-webkit-border-radius: 5px;
-moz-border-radius: 5px;
background: rgb(0,0,0);
background-image: -webkit-gradient(linear, 0% 0%, 0% 100%, to(rgb(0,0,0)), from(rgb(90,90,90)));
background-image: -moz-linear-gradient(100% 100% 90deg, rgb(0,0,0), rgb(90,90,90) 100%);
border: 1px solid black;
}
div.inner-row {
display: block;
position: relative;
padding: 1em 1em 1em 10em;
}
div.inner-row + div.inner-row { padding-top: 4.8em; }
div.tool {
display: inline-block;
position: relative;
width: 30%;
padding: 0 1em 3em 0;
}
div.tool img {
position: absolute;
left: -64px;
top: 0;
}
div.tool h1, div.welcome {
font-size:18px;
font-weight: 300;
}
div.tool h1 {
position: relative;
top: -15px;
}
div.tool p {
display: block;
font-size: 13px;
line-height: 18px;
position: absolute;
padding-right: 6em;
top: 12px;
}
</style>
<div class="welcome">
<p>We provide the tools. You create great open source software.
<a href="/p/add_project">Start Your Project</a>
</p>
</div>
<div class="inner-row">
<div class="tool">
<img src="/nf/allura/images/wiki_48.png" alt=""/>
<h1>Wikis</h1>
<p>
Documentation is key to your project and the wiki tool helps make it easy for anyone to contribute.
</p>
</div>
<div class="tool">
<img src="/nf/allura/images/code_48.png" alt=""/>
<h1>Code</h1>
<p>
SVN, Git and Mercurial will help you keep track of your changes.
</p>
</div>
<div class="tool">
<img src="/nf/allura/images/tickets_48.png" alt=""/>
<h1>Tickets</h1>
<p>
Bugs, enhancements, tasks, etc., will help you plan and manage your development.
</p>
</div>
</div>
<div class="inner-row">
<div class="tool">
<img src="/nf/allura/images/downloads_48.png" alt=""/>
<h1>Downloads</h1>
<p>
Use the largest free, managed, global mirror network to distribute your files.
</p>
</div>
<div class="tool">
<img src="/nf/allura/images/stats_48.png" alt=""/>
<h1>Stats</h1>
<p>
Follow the download trends that enable you to develop better software.
</p>
</div>
<div class="tool">
<img src="/nf/allura/images/forums_48.png" alt=""/>
<h1>Forums</h1>
<p>
Collaborate with your community in your forums.
</p>
</div>
</div>
"""
projects_neighborhood = Neighborhood.query.find(dict(name='Projects')).first()
projects_neighborhood.homepage = homepage
ThreadLocalORMSession.flush_all()
| apache-2.0 |
slash-testing/backslash-python | backslash/api_object.py | 2 | 1450 |
class APIObject(object):
def __init__(self, client, json_data):
super(APIObject, self).__init__()
self.client = client
self._data = json_data
@property
def api_url(self):
return self.client.url.add_path(self.api_path)
@property
def ui_url(self):
raise NotImplementedError() # pragma: no cover
def __eq__(self, other):
if not isinstance(other, APIObject):
return NotImplemented
return self.client is other.client and self._data == other._data # pylint: disable=protected-access
def __ne__(self, other):
return not (self == other) # pylint: disable=superfluous-parens
def __getattr__(self, name):
try:
return self.__dict__['_data'][name]
except KeyError:
raise AttributeError(name)
def refresh(self):
prev_id = self.id
self._data = self._fetch()
assert self.id == prev_id
return self
def _fetch(self):
return self.client.api.get(self.api_path, raw=True)[self._data['type']]
def __repr__(self):
return '<API:{data[type]}:{data[id]}>'.format(data=self._data)
def without_fields(self, field_names):
new_data = dict((field_name, field_value)
for field_name, field_value in self._data.items()
if field_name not in field_names)
return type(self)(self.client, new_data)
| bsd-3-clause |
tsheets/api_python | tsheets/model.py | 1 | 5132 | import pytz
from . import helpers
import dateutil.parser
from datetime import datetime, date
class Model(object):
_accessors = {}
_default_type = "anything"
def __init__(self, **kwargs):
self._dynamic_accessors = []
if kwargs:
self.__class__.mass_assign(self, kwargs)
@classmethod
def add_field(cls, fname, type_f, options={}):
setattr(cls, fname, None)
if cls not in Model._accessors:
Model._accessors[cls] = []
exclude = options.get('exclude', [])
Model._accessors[cls].append({'name': fname, 'type': type_f, 'exclude': exclude})
@classmethod
def add_default_type(cls, data_type):
cls._default_type = data_type
@classmethod
def from_raw(cls, hash):
instance = cls()
return cls.mass_assign(instance, hash)
@classmethod
def mass_assign(cls, instance, hash):
dynamic = instance._dynamic_accessors
for k,v in hash.items():
casted = cls.cast_raw(v, k)
if hasattr(instance, k):
setattr(instance, k, casted)
else:
setattr(instance, k, casted)
dynamic.append({'name': k})
instance._dynamic_accessors = dynamic
return instance
@classmethod
def type_for(cls, field_name):
accessor = Model._accessors.get(cls, [])
for i in accessor:
if i["name"] == field_name:
return i["type"]
return cls._default_type
@classmethod
def type_for_key(cls, key):
return cls.type_for(key)
@classmethod
def cast_raw(cls, value, key, type=None):
if value is None:
return None
if type:
type_symbol = type
else:
type_symbol = cls.type_for_key(key)
if isinstance(type_symbol, list):
value = [cls.cast_raw(i, key, type_symbol[0]) for i in value]
return value
elif type_symbol == str:
return value
elif type_symbol == int:
return int(value)
elif type_symbol == datetime:
try:
return dateutil.parser.parse(value)
except:
return None
elif type_symbol == date:
try:
return datetime.strptime(value, "%Y-%m-%d").date()
except:
return None
elif type_symbol == bool:
return value == True
elif type_symbol == dict:
return value
elif type_symbol == float:
return float(value)
elif type_symbol == object:
if not value:
return {}
return value
elif type_symbol == "anything":
return value
else:
return helpers.to_class(type_symbol)().from_raw(value)
def cast_to_raw(self, value, key, type = None):
type_symbol = type or self.__class__.type_for_key(key)
if isinstance(type_symbol, list):
value = [self.cast_to_raw(i, key, type_symbol[0]) for i in value]
return value
elif type_symbol == str:
return value
elif type_symbol == int:
return value
elif type_symbol == datetime:
if not value:
return ""
try:
if not value.tzinfo:
return value.replace(tzinfo=pytz.UTC).replace(microsecond=0).isoformat()
return value.isoformat()
except:
return None
elif type_symbol == date:
if not value:
return ""
try:
return value.strftime("%Y-%m-%d")
except:
return None
elif type_symbol == bool:
return value
elif type_symbol == dict:
return value
elif type_symbol == float:
return value
elif type_symbol == object:
if not value:
return ""
return value
elif type_symbol == "anything":
return value
else:
if not value:
return None
return value.to_raw()
def to_raw(self, mode=None):
attributes = self.get_attributes(mode)
obj = {}
for k, v in attributes.items():
obj[k] = self.cast_to_raw(v, k)
return obj
def allowed_for_mode(self, mode, acc):
return (mode is None) or (not bool(acc['exclude'])) or not (mode in acc['exclude'])
def attribute_for_accessors(self, accessor):
sum = {}
for acc in accessor:
sum[acc['name']] = self.__getattribute__(acc['name'])
return sum
def get_attributes(self, mode=None):
_accessors = Model._accessors[self.__class__] if self.__class__ in Model._accessors else []
acc = [a for a in _accessors if self.allowed_for_mode(mode, a)]
acc = []
for a in _accessors:
if self.allowed_for_mode(mode, a):
acc.append(a)
return self.attribute_for_accessors(acc)
| mit |
dharmabumstead/ansible | lib/ansible/utils/module_docs_fragments/vmware.py | 12 | 1998 | # Copyright: (c) 2016, Charles Paul <cpaul@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Parameters for VMware modules
DOCUMENTATION = '''
options:
hostname:
description:
- The hostname or IP address of the vSphere vCenter or ESXi server.
- If the value is not specified in the task, the value of environment variable C(VMWARE_HOST) will be used instead.
- Environment variable supported added in version 2.6.
required: False
username:
description:
- The username of the vSphere vCenter or ESXi server.
- If the value is not specified in the task, the value of environment variable C(VMWARE_USER) will be used instead.
- Environment variable supported added in version 2.6.
required: False
aliases: ['user', 'admin']
password:
description:
- The password of the vSphere vCenter or ESXi server.
- If the value is not specified in the task, the value of environment variable C(VMWARE_PASSWORD) will be used instead.
- Environment variable supported added in version 2.6.
required: False
aliases: ['pass', 'pwd']
validate_certs:
description:
- Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
- If the value is not specified in the task, the value of environment variable C(VMWARE_VALIDATE_CERTS) will be used instead.
- Environment variable supported added in version 2.6.
default: 'True'
type: bool
port:
description:
- The port number of the vSphere vCenter or ESXi server.
- If the value is not specified in the task, the value of environment variable C(VMWARE_PORT) will be used instead.
- Environment variable supported added in version 2.6.
required: False
default: 443
version_added: 2.5
'''
| gpl-3.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/jinja2/sandbox.py | 130 | 16707 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
import types
import operator
from collections import Mapping
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, PY2
from jinja2.utils import Markup
from markupsafe import EscapeFormatter
from string import Formatter
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
if PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
else:
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
UNSAFE_FUNCTION_ATTRIBUTES = set()
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code'])
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
class _MagicFormatMapping(Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See http://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == '':
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
def inspect_format_method(callable):
if not isinstance(callable, (types.MethodType,
types.BuiltinMethodType)) or \
callable.__name__ != 'format':
return None
obj = callable.__self__
if isinstance(obj, string_types):
return obj
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def format_string(self, s, args, kwargs):
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, s.escape)
else:
formatter = SandboxedFormatter(self)
kwargs = _MagicFormatMapping(args, kwargs)
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
# This really is not a public API apparenlty.
try:
from _string import formatter_field_name_split
except ImportError:
def formatter_field_name_split(field_name):
return field_name._formatter_field_name_split()
class SandboxedFormatterMixin(object):
def __init__(self, env):
self._env = env
def get_field(self, field_name, args, kwargs):
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
def __init__(self, env):
SandboxedFormatterMixin.__init__(self, env)
Formatter.__init__(self)
class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
def __init__(self, env, escape):
SandboxedFormatterMixin.__init__(self, env)
EscapeFormatter.__init__(self, escape)
| apache-2.0 |
rupakc/Kaggle-Compendium | Santas Stolen Sleigh/SantaUtil.py | 1 | 6924 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 23:21:29 2016
Defines a set of utility functions to be used for prediction
@author: Rupak Chakraborty
"""
import math
from trip import Trip
from gift import Gift
import random
import time
import pandas as pd
import operator
RADIUS_EARTH = 6773
NORTH_POLE_LAT = 90
NORTH_POLE_LONG = 0
EMPTY_SLEIGH_WEIGHT = 10
SLEIGH_CAPACITY = 1000
random.seed(time.time())
gift_filename = "Santa's Stolen Sleigh/gifts.csv"
"""
Calculates the haversine distance between two given points
The two points are the values of the latitude and longitude
in degrees
Params:
--------
lat_first - Latitude of the first point
long_first - Longitude of the first point
lat_second - Latitude of second point
long_second - Longitude of the second point
Returns:
---------
The haversine distance between the two given points i.e. a float
"""
def haversineDistance(lat_first,long_first,lat_second,long_second):
lat_first = math.radians(lat_first)
long_first = math.radians(long_first)
lat_second = math.radians(lat_second)
long_second = math.radians(long_second)
sine_squared_lat = math.pow(math.sin((lat_first-lat_second)/2.0),2.0)
sine_squared_long = math.pow(math.sin((long_first-long_second)/2.0),2.0)
cos_lat_term = math.cos(lat_first)*math.cos(lat_second)*sine_squared_long
total_term = cos_lat_term + sine_squared_lat
distance = 2*RADIUS_EARTH*math.asin(math.sqrt(total_term))
return distance
"""
Defines the fitness function for the trip list i.e. all deliveries
The total fitness is defined as the weighted sum of distances
Params:
--------
trip_list: A List of trips which Santa needs to take (A list containing the trip object)
Returns:
---------
Total Cost of the given trip list (i.e. Fitness)
"""
def tripFitness(trip_list):
total_cost = 0
for trip in trip_list:
total_cost = total_cost + trip.trip_cost
return total_cost
"""
Given a list of gifts calculates the cost of the trip (i.e. Weighted Distance)
Params:
--------
gift_list: A list of gifts in the order in which they have to be delivered
Returns:
---------
Cost of the trip with the given order of gifts (i.e. A Floating point number)
"""
def tripCost(gift_list):
gift_size = len(gift_list)
initial_gift_weight = tripWeightUtil(gift_list,0,gift_size-1)
weighted_distance = initial_gift_weight*haversineDistance(NORTH_POLE_LAT,NORTH_POLE_LONG,gift_list[0].latitude,gift_list[0].longitude)
for i in range(gift_size-1):
remaining_weight = tripWeightUtil(gift_list,i+1,gift_size-1)
distance = haversineDistance(gift_list[i].latitude,gift_list[i].longitude,gift_list[i+1].latitude,gift_list[i+1].longitude)
weighted_distance = weighted_distance + remaining_weight*distance
returning_distance = haversineDistance(gift_list[gift_size-1].latitude,gift_list[gift_size-1].longitude,NORTH_POLE_LAT,NORTH_POLE_LONG)
weighted_distance = weighted_distance + EMPTY_SLEIGH_WEIGHT*returning_distance
return weighted_distance
"""
Utility function to calculate the cumulative weight of gifts in a given range
Both ends of the range are included
Params:
--------
gift_list : List of gift objects
start_index : Starting index for gift list
end_index : Ending index of the gift list
Returns:
---------
Returns the sum of weights in a given range
"""
def tripWeightUtil(gift_list,start_index,end_index):
total_weight = 0
while start_index <= end_index:
total_weight = total_weight + gift_list[start_index].weight
start_index = start_index + 1
return total_weight
"""
Applies the mutation operator on trip list i.e. swaps two trips
Params:
-------
trip_list: List containing the trips taken by Santa
Returns:
--------
A new list containing the trip list with values swapped
"""
def mutateTripList(trip_list):
i,j = generateSwapIndices(len(trip_list))
temp = trip_list[i]
trip_list[i] = trip_list[j]
trip_list[j] = temp
return trip_list
"""
Applies the mutation operator on the gift list i.e. swaps two gifts in a list
Params:
-------
gift_list: List containing the gifts taken by Santa
Returns:
--------
A new list containing the gift list with values swapped
"""
def mutateGiftList(gift_list):
i,j = generateSwapIndices(len(gift_list))
temp = gift_list[i]
gift_list[i] = gift_list[j]
gift_list[j] = temp
return gift_list
"""
Utility function to generate two distinct random integers from zero to a given range
Params:
--------
max_size: Integer containing the maximum limit for generation of the random integers
Returns:
--------
Two distinct random integers between 0 and a given max_size
"""
def generateSwapIndices(max_size):
a = random.randint(0,max_size-1)
b = random.randint(0,max_size-1)
while b != a:
b = random.randint(0,max_size)
return a,b
"""
Returns the dataFrame containing the gift information
Params:
-------
String containing the filename from which the information is to be extracted
Returns:
--------
Pandas Dataframe object containing the gift information
"""
def getGiftList(filename):
giftFrame = pd.read_csv(filename)
gift_list = list([])
for i in range(len(giftFrame)):
gift_series = giftFrame.iloc[i]
gift = Gift(gift_series.GiftId,gift_series.Latitude,gift_series.Longitude,gift_series.Weight)
gift_list.append(gift)
return gift_list;
"""
Sorts a given map by the values and returns a list containing th sorted tuples
"""
def sortMapByValues(map_to_sort):
sorted_map = sorted(map_to_sort.items(), key=operator.itemgetter(1),reverse=False)
return sorted_map
"""
Sorts the given population by its fitness value
Params:
-------
initial_population: List containing the initial population
Returns:
--------
List of tuples containing the indices of the initial population and its fitness
"""
def sortPopulationByFitness(initial_population):
i = 0;
fitness_population_map = {}
for trip_gene in initial_population:
fitness_population_map[i] = tripFitness(trip_gene)
i = i + 1
ordered_fitness_list = sortMapByValues(fitness_population_map)
return ordered_fitness_list
"""
Given all the trips in a list returns the one with the maximum cost and its index
Params:
---------
trip_list: List of trips to be taken for delivery
Returns:
--------
The trip with the maximum cost and its corresponding index
"""
def maximumTripCost(trip_list):
index = 0
max_trip = trip_list[0]
for i,trip in enumerate(trip_list):
if trip.trip_cost > max_trip:
max_trip = trip.trip_cost
index = i
return index,trip
| mit |
Kilhog/odoo | addons/l10n_cr/__init__.py | 438 | 2045 | # -*- encoding: utf-8 -*-
##############################################################################
#
# __init__.py
# l10n_cr_account
# First author: Carlos Vásquez <carlos.vasquez@clearcorp.co.cr> (ClearCorp S.A.)
# Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Jens-G/thrift | contrib/zeromq/TZmqServer.py | 43 | 2709 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import zmq
import thrift.server.TServer
import thrift.transport.TTransport
class TZmqServer(thrift.server.TServer.TServer):
def __init__(self, processor, ctx, endpoint, sock_type):
thrift.server.TServer.TServer.__init__(self, processor, None)
self.zmq_type = sock_type
self.socket = ctx.socket(sock_type)
self.socket.bind(endpoint)
def serveOne(self):
msg = self.socket.recv()
itrans = thrift.transport.TTransport.TMemoryBuffer(msg)
otrans = thrift.transport.TTransport.TMemoryBuffer()
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
self.processor.process(iprot, oprot)
except Exception:
logging.exception("Exception while processing request")
# Fall through and send back a response, even if empty or incomplete.
if self.zmq_type == zmq.REP:
msg = otrans.getvalue()
self.socket.send(msg)
def serve(self):
while True:
self.serveOne()
class TZmqMultiServer(object):
def __init__(self):
self.servers = []
def serveOne(self, timeout=-1):
self._serveActive(self._setupPoll(), timeout)
def serveForever(self):
poll_info = self._setupPoll()
while True:
self._serveActive(poll_info, -1)
def _setupPoll(self):
server_map = {}
poller = zmq.Poller()
for server in self.servers:
server_map[server.socket] = server
poller.register(server.socket, zmq.POLLIN)
return (server_map, poller)
def _serveActive(self, poll_info, timeout):
(server_map, poller) = poll_info
ready = dict(poller.poll())
for sock, state in ready.items():
assert (state & zmq.POLLIN) != 0
server_map[sock].serveOne()
| apache-2.0 |
michhar/ms-pythonbot | msbot/__init__.py | 1 | 1583 | """
The flask application package.
"""
#####################################################################
# Create the Flask app
#####################################################################
from flask import Flask
from .callback_utils import Callbacks
import os
from flask_pyoidc.flask_pyoidc import OIDCAuthentication
app = Flask(__name__)
### Flask-pyoidc ###
PORT = os.getenv('SERVER_PORT', '3978')
config = {
'SERVER_NAME': os.getenv('SERVER_NAME', 'localhost'), # + ':' + PORT,
'SECRET_KEY': 'dev',
'PREFERRED_URL_SCHEME': 'https',
'DEBUG': True
}
app.config.update(config)
client_info = {
'client_id': os.getenv('MICROSOFT_CLIENT_ID', 'foo'),
'client_secret': os.getenv('MICROSOFT_CLIENT_SECRET', 'bar'),
'scope': 'https://api.botframework.com/.default'
}
provider_config = {
'issuer': 'https://api.botframework.com',
'authorization_endpoint': 'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token',
'token_endpoint': 'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token',
# 'userinfo_endpoint': 'https://login.microsoftonline.com/common/oauth2/v2.0/userinfo',
# 'grant_type': 'client_credentials',
'scope': 'https://api.botframework.com/.default'
}
auth = OIDCAuthentication(app,
provider_configuration_info=provider_config,
client_registration_info=client_info)
app_backend = Callbacks()
import msbot.views
| mit |
DataONEorg/d1_python | lib_common/src/d1_common/system_metadata.py | 1 | 14840 | # This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for handling the DataONE SystemMetadata type.
DataONE API methods such as `MNStorage.create()` require a Science Object and System
Metadata pair.
Examples:
Example v2 SystemMetadata XML document with all optional values included:
::
<v2:systemMetadata xmlns:v2="http://ns.dataone.org/service/types/v2.0">
<!--Optional:-->
<serialVersion>11</serialVersion>
<identifier>string</identifier>
<formatId>string</formatId>
<size>11</size>
<checksum algorithm="string">string</checksum>
<!--Optional:-->
<submitter>string</submitter>
<rightsHolder>string</rightsHolder>
<!--Optional:-->
<accessPolicy>
<!--1 or more repetitions:-->
<allow>
<!--1 or more repetitions:-->
<subject>string</subject>
<!--1 or more repetitions:-->
<permission>read</permission>
</allow>
</accessPolicy>
<!--Optional:-->
<replicationPolicy replicationAllowed="true" numberReplicas="3">
<!--Zero or more repetitions:-->
<preferredMemberNode>string</preferredMemberNode>
<!--Zero or more repetitions:-->
<blockedMemberNode>string</blockedMemberNode>
</replicationPolicy>
<!--Optional:-->
<obsoletes>string</obsoletes>
<obsoletedBy>string</obsoletedBy>
<archived>true</archived>
<dateUploaded>2014-09-18T17:18:33</dateUploaded>
<dateSysMetadataModified>2006-08-19T11:27:14-06:00</dateSysMetadataModified>
<originMemberNode>string</originMemberNode>
<authoritativeMemberNode>string</authoritativeMemberNode>
<!--Zero or more repetitions:-->
<replica>
<replicaMemberNode>string</replicaMemberNode>
<replicationStatus>failed</replicationStatus>
<replicaVerified>2013-05-21T19:02:49-06:00</replicaVerified>
</replica>
<!--Optional:-->
<seriesId>string</seriesId>
<!--Optional:-->
<mediaType name="string">
<!--Zero or more repetitions:-->
<property name="string">string</property>
</mediaType>
<!--Optional:-->
<fileName>string</fileName>
</v2:systemMetadata>
"""
import datetime
import logging
import os
import d1_common.checksum
import d1_common.date_time
import d1_common.type_conversions
import d1_common.types.dataoneTypes
import d1_common.wrap.access_policy
import d1_common.xml
logger = logging.getLogger(__name__)
SYSMETA_ROOT_CHILD_LIST = [
"serialVersion",
"identifier",
"formatId",
"size",
"checksum",
"submitter",
"rightsHolder",
"accessPolicy",
"replicationPolicy",
"obsoletes",
"obsoletedBy",
"archived",
"dateUploaded",
"dateSysMetadataModified",
"originMemberNode",
"authoritativeMemberNode",
"replica",
"seriesId",
"mediaType",
"fileName",
]
def is_sysmeta_pyxb(sysmeta_pyxb):
"""Args: sysmeta_pyxb: Object that may or may not be a SystemMetadata PyXB object.
Returns:
bool:
- ``True`` if ``sysmeta_pyxb`` is a SystemMetadata PyXB object.
- ``False`` if ``sysmeta_pyxb`` is not a PyXB object or is a PyXB object of a
type other than SystemMetadata.
"""
return (
d1_common.type_conversions.is_pyxb_d1_type(sysmeta_pyxb)
and d1_common.type_conversions.pyxb_get_type_name(sysmeta_pyxb)
== "SystemMetadata"
)
def normalize_in_place(sysmeta_pyxb, reset_timestamps=False, reset_filename=False):
"""Normalize SystemMetadata PyXB object in-place.
Args:
sysmeta_pyxb:
SystemMetadata PyXB object to normalize.
reset_timestamps: bool
``True``: Timestamps in the SystemMetadata are set to a standard value so that
objects that are compared after normalization register as equivalent if only
their timestamps differ.
Notes:
The SystemMetadata is normalized by removing any redundant information and
ordering all sections where there are no semantics associated with the order. The
normalized SystemMetadata is intended to be semantically equivalent to the
un-normalized one.
"""
if sysmeta_pyxb.accessPolicy is not None:
sysmeta_pyxb.accessPolicy = d1_common.wrap.access_policy.get_normalized_pyxb(
sysmeta_pyxb.accessPolicy
)
if getattr(sysmeta_pyxb, "mediaType", False):
d1_common.xml.sort_value_list_pyxb(sysmeta_pyxb.mediaType.property_)
if getattr(sysmeta_pyxb, "replicationPolicy", False):
d1_common.xml.sort_value_list_pyxb(
sysmeta_pyxb.replicationPolicy.preferredMemberNode
)
d1_common.xml.sort_value_list_pyxb(
sysmeta_pyxb.replicationPolicy.blockedMemberNode
)
d1_common.xml.sort_elements_by_child_values(
sysmeta_pyxb.replica,
["replicaVerified", "replicaMemberNode", "replicationStatus"],
)
sysmeta_pyxb.archived = bool(sysmeta_pyxb.archived)
if reset_timestamps:
epoch_dt = datetime.datetime(1970, 1, 1, tzinfo=d1_common.date_time.UTC())
sysmeta_pyxb.dateUploaded = epoch_dt
sysmeta_pyxb.dateSysMetadataModified = epoch_dt
for replica_pyxb in getattr(sysmeta_pyxb, "replica", []):
replica_pyxb.replicaVerified = epoch_dt
else:
sysmeta_pyxb.dateUploaded = d1_common.date_time.round_to_nearest(
sysmeta_pyxb.dateUploaded
)
sysmeta_pyxb.dateSysMetadataModified = d1_common.date_time.round_to_nearest(
sysmeta_pyxb.dateSysMetadataModified
)
for replica_pyxb in getattr(sysmeta_pyxb, "replica", []):
replica_pyxb.replicaVerified = d1_common.date_time.round_to_nearest(
replica_pyxb.replicaVerified
)
if reset_filename:
sysmeta_pyxb.fileName = None
def are_equivalent_pyxb(a_pyxb, b_pyxb, ignore_timestamps=False, ignore_filename=False):
"""Determine if SystemMetadata PyXB objects are semantically equivalent.
Normalize then compare SystemMetadata PyXB objects for equivalency.
Args:
a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare
ignore_timestamps: bool
``True``: Timestamps are ignored during the comparison.
ignore_filename: bool
``True``: FileName elements are ignored during the comparison.
This is necessary in cases where GMN returns a generated filename because one
was not provided in the SysMeta.
Returns: bool:
``True`` if SystemMetadata PyXB objects are semantically equivalent.
Notes:
The SystemMetadata is normalized by removing any redundant information and
ordering all sections where there are no semantics associated with the order. The
normalized SystemMetadata is intended to be semantically equivalent to the
un-normalized one.
"""
normalize_in_place(a_pyxb, ignore_timestamps, ignore_filename)
normalize_in_place(b_pyxb, ignore_timestamps, ignore_filename)
a_xml = d1_common.xml.serialize_to_xml_str(a_pyxb)
b_xml = d1_common.xml.serialize_to_xml_str(b_pyxb)
are_equivalent = d1_common.xml.are_equivalent(a_xml, b_xml)
if not are_equivalent:
logger.debug("XML documents not equivalent:")
logger.debug(d1_common.xml.format_diff_xml(a_xml, b_xml))
return are_equivalent
def are_equivalent_xml(a_xml, b_xml, ignore_timestamps=False):
"""Determine if two SystemMetadata XML docs are semantically equivalent.
Normalize then compare SystemMetadata XML docs for equivalency.
Args:
a_xml, b_xml: bytes
UTF-8 encoded SystemMetadata XML docs to compare
ignore_timestamps: bool
``True``: Timestamps in the SystemMetadata are ignored so that objects that are
compared register as equivalent if only their timestamps differ.
Returns: bool:
``True`` if SystemMetadata XML docs are semantically equivalent.
Notes:
The SystemMetadata is normalized by removing any redundant information and
ordering all sections where there are no semantics associated with the order. The
normalized SystemMetadata is intended to be semantically equivalent to the
un-normalized one.
"""
"""Normalizes then compares SystemMetadata XML docs for equivalency.
``a_xml`` and ``b_xml`` should be utf-8 encoded DataONE System Metadata XML
documents.
"""
return are_equivalent_pyxb(
d1_common.xml.deserialize(a_xml),
d1_common.xml.deserialize(b_xml),
ignore_timestamps,
)
def clear_elements(sysmeta_pyxb, clear_replica=True, clear_serial_version=True):
"""{clear_replica} causes any replica information to be removed from the object.
{clear_replica} ignores any differences in replica information, as this information
is often different between MN and CN.
"""
if clear_replica:
sysmeta_pyxb.replica = None
if clear_serial_version:
sysmeta_pyxb.serialVersion = None
sysmeta_pyxb.replicationPolicy = None
def update_elements(dst_pyxb, src_pyxb, el_list):
"""Copy elements specified in ``el_list`` from ``src_pyxb`` to ``dst_pyxb``
Only elements that are children of root are supported. See
SYSMETA_ROOT_CHILD_LIST.
If an element in ``el_list`` does not exist in ``src_pyxb``, it is removed from
``dst_pyxb``.
"""
invalid_element_set = set(el_list) - set(SYSMETA_ROOT_CHILD_LIST)
if invalid_element_set:
raise ValueError(
'Passed one or more invalid elements. invalid="{}"'.format(
", ".join(sorted(list(invalid_element_set)))
)
)
for el_str in el_list:
setattr(dst_pyxb, el_str, getattr(src_pyxb, el_str, None))
def generate_system_metadata_pyxb(
pid,
format_id,
sciobj_stream,
submitter_str,
rights_holder_str,
authoritative_mn_urn,
# SeriesID and obsolescence
sid=None,
obsoletes_pid=None,
obsoleted_by_pid=None,
is_archived=False,
#
serial_version=1,
uploaded_datetime=None,
modified_datetime=None,
file_name=None,
origin_mn_urn=None,
# Access Policy
is_private=False,
access_list=None,
# Media Type
media_name=None,
media_property_list=None,
# Replication Policy
is_replication_allowed=False,
preferred_mn_list=None,
blocked_mn_list=None,
#
pyxb_binding=None,
):
"""Generate a System Metadata PyXB object
Args:
pid:
format_id:
sciobj_stream:
submitter_str:
rights_holder_str:
authoritative_mn_urn:
pyxb_binding:
sid:
obsoletes_pid:
obsoleted_by_pid:
is_archived:
serial_version:
uploaded_datetime:
modified_datetime:
file_name:
origin_mn_urn:
access_list:
is_private:
media_name:
media_property_list:
is_replication_allowed:
preferred_mn_list:
blocked_mn_list:
Returns:
systemMetadata PyXB object
"""
pyxb_binding = pyxb_binding or d1_common.types.dataoneTypes
sysmeta_pyxb = pyxb_binding.systemMetadata()
sysmeta_pyxb.identifier = pid
sysmeta_pyxb.seriesId = sid
sysmeta_pyxb.formatId = format_id
sysmeta_pyxb.checksum, sysmeta_pyxb.size = gen_checksum_and_size(sciobj_stream)
sysmeta_pyxb.submitter = submitter_str
sysmeta_pyxb.rightsHolder = rights_holder_str
sysmeta_pyxb.authoritativeMemberNode = authoritative_mn_urn
sysmeta_pyxb.originMemberNode = origin_mn_urn or authoritative_mn_urn
sysmeta_pyxb.obsoletes = obsoletes_pid
sysmeta_pyxb.obsoletedBy = obsoleted_by_pid
sysmeta_pyxb.archived = is_archived
sysmeta_pyxb.serialVersion = serial_version
sysmeta_pyxb.dateUploaded = uploaded_datetime or d1_common.date_time.utc_now()
sysmeta_pyxb.dateSysMetadataModified = (
modified_datetime or sysmeta_pyxb.dateUploaded
)
sysmeta_pyxb.fileName = file_name
sysmeta_pyxb.replica = None
gen_access_policy(pyxb_binding, sysmeta_pyxb, is_private, access_list)
sysmeta_pyxb.replicationPolicy = gen_replication_policy(
pyxb_binding, preferred_mn_list, blocked_mn_list, is_replication_allowed
)
if media_name or media_property_list:
sysmeta_pyxb.mediaType = gen_media_type(
pyxb_binding, media_name, media_property_list
)
return sysmeta_pyxb
def gen_checksum_and_size(sciobj_stream):
sciobj_stream.seek(0)
checksum_pyxb = d1_common.checksum.create_checksum_object_from_stream(sciobj_stream)
sciobj_stream.seek(0, os.SEEK_END)
sciobj_size = sciobj_stream.tell()
sciobj_stream.seek(0)
return checksum_pyxb, sciobj_size
def gen_access_policy(pyxb_binding, sysmeta_pyxb, is_private, access_list):
with d1_common.wrap.access_policy.wrap_sysmeta_pyxb(
sysmeta_pyxb, pyxb_binding
) as ap:
if not is_private:
ap.add_public_read()
if access_list is not None:
for subj_str, perm_str in access_list:
ap.add_perm(subj_str, perm_str)
ap.update()
def gen_replication_policy(
pyxb_binding,
preferred_mn_list=None,
blocked_mn_list=None,
is_replication_allowed=False,
):
rp_pyxb = pyxb_binding.replicationPolicy()
rp_pyxb.preferredMemberNode = preferred_mn_list
rp_pyxb.blockedMemberNode = blocked_mn_list
rp_pyxb.replicationAllowed = is_replication_allowed
rp_pyxb.numberReplicas = 3 if is_replication_allowed else 0
return rp_pyxb
def gen_media_type(pyxb_binding, media_name, media_property_list=None):
assert (
media_name is not None
), "When a media_property_list is set, the media_name must also be set"
media_type_pyxb = pyxb_binding.MediaType(name=media_name)
for name_str, value_str in media_property_list or []:
media_type_pyxb.property_.append(
pyxb_binding.MediaTypeProperty(value_str, name=name_str)
)
return media_type_pyxb
| apache-2.0 |
renegelinas/mi-instrument | mi/dataset/parser/cg_stc_eng_stc.py | 5 | 71530 | #!/usr/bin/env python
"""
@package mi.dataset.parser.cg_stc_eng_stc
@file marine-integrations/mi/dataset/parser/cg_stc_eng_stc.py
@author Mike Nicoletti
@brief Parser for the cg_stc_eng_stc dataset driver
Release notes:
Starting the cg_stc_eng_stc driver
"""
__author__ = 'Mike Nicoletti'
__license__ = 'Apache 2.0'
import copy
import re
import ntplib
from mi.core.log import get_logger ; log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.dataset_data_particle import DataParticle, DataParticleKey
from mi.core.exceptions import SampleException, DatasetParserException, SampleEncodingException
from mi.dataset.dataset_parser import Parser
from mi.dataset.param_dict import DatasetParameterDict
class CgDataParticleType(BaseEnum):
TELEMETERED = 'cg_stc_eng_stc'
RECOVERED = 'cg_stc_eng_stc_recovered'
class CgStcEngStcParserDataParticleKey(BaseEnum):
CG_ENG_PLATFORM_TIME = 'cg_eng_platform_time'
CG_ENG_PLATFORM_UTIME = 'cg_eng_platform_utime'
CG_ENG_MSG_CNTS_C_GPS = 'cg_eng_msg_cnts_c_gps'
CG_ENG_MSG_CNTS_C_NTP = 'cg_eng_msg_cnts_c_ntp'
CG_ENG_MSG_CNTS_C_PPS = 'cg_eng_msg_cnts_c_pps'
CG_ENG_MSG_CNTS_C_POWER_SYS = 'cg_eng_msg_cnts_c_power_sys'
CG_ENG_MSG_CNTS_C_SUPERV = 'cg_eng_msg_cnts_c_superv'
CG_ENG_MSG_CNTS_C_TELEM = 'cg_eng_msg_cnts_c_telem'
CG_ENG_ERR_C_GPS = 'cg_eng_err_c_gps'
CG_ENG_ERR_C_PPS = 'cg_eng_err_c_pps'
CG_ENG_ERR_C_CTL = 'cg_eng_err_c_ctl'
CG_ENG_ERR_C_STATUS = 'cg_eng_err_c_status'
CG_ENG_ERR_SUPERV = 'cg_eng_err_superv'
CG_ENG_ERR_C_POWER_SYS = 'cg_eng_err_c_power_sys'
CG_ENG_ERR_C_TELEM_SYS = 'cg_eng_err_c_telem_sys'
CG_ENG_ERR_C_IRID = 'cg_eng_err_c_irid'
CG_ENG_ERR_C_IMM = 'cg_eng_err_c_imm'
CG_ENG_ERR_CPM1 = 'cg_eng_err_cpm1'
CG_ENG_ERR_D_CTL = 'cg_eng_err_d_ctl'
CG_ENG_ERR_D_STATUS = 'cg_eng_err_d_status'
CG_ENG_ERR_DLOG_MGR = 'cg_eng_err_dlog_mgr'
CG_ENG_ERR_DLOGP1 = 'cg_eng_err_dlogp1'
CG_ENG_ERR_DLOGP2 = 'cg_eng_err_dlogp2'
CG_ENG_ERR_DLOGP3 = 'cg_eng_err_dlogp3'
CG_ENG_ERR_DLOGP4 = 'cg_eng_err_dlogp4'
CG_ENG_ERR_DLOGP5 = 'cg_eng_err_dlogp5'
CG_ENG_ERR_DLOGP6 = 'cg_eng_err_dlogp6'
CG_ENG_ERR_DLOGP7 = 'cg_eng_err_dlogp7'
CG_ENG_ERR_DLOGP8 = 'cg_eng_err_dlogp8'
CG_ENG_ERR_RCMD = 'cg_eng_err_rcmd'
CG_ENG_ERR_BCMD = 'cg_eng_err_bcmd'
CG_ENG_ERRMSG_C_GPS = 'cg_eng_errmsg_c_gps'
CG_ENG_ERRMSG_C_PPS = 'cg_eng_errmsg_c_pps'
CG_ENG_ERRMSG_C_CTL = 'cg_eng_errmsg_c_ctl'
CG_ENG_ERRMSG_C_STATUS = 'cg_eng_errmsg_c_status'
CG_ENG_ERRMSG_SUPERV = 'cg_eng_errmsg_superv'
CG_ENG_ERRMSG_C_POWER_SYS = 'cg_eng_errmsg_c_power_sys'
CG_ENG_ERRMSG_C_TELEM_SYS = 'cg_eng_errmsg_c_telem_sys'
CG_ENG_ERRMSG_C_IRID = 'cg_eng_errmsg_c_irid'
CG_ENG_ERRMSG_C_IMM = 'cg_eng_errmsg_c_imm'
CG_ENG_ERRMSG_CPM1 = 'cg_eng_errmsg_cpm1'
CG_ENG_ERRMSG_D_CTL = 'cg_eng_errmsg_d_ctl'
CG_ENG_ERRMSG_D_STATUS = 'cg_eng_errmsg_d_status'
CG_ENG_ERRMSG_DLOG_MGR = 'cg_eng_errmsg_dlog_mgr'
CG_ENG_ERRMSG_DLOGP1 = 'cg_eng_errmsg_dlogp1'
CG_ENG_ERRMSG_DLOGP2 = 'cg_eng_errmsg_dlogp2'
CG_ENG_ERRMSG_DLOGP3 = 'cg_eng_errmsg_dlogp3'
CG_ENG_ERRMSG_DLOGP4 = 'cg_eng_errmsg_dlogp4'
CG_ENG_ERRMSG_DLOGP5 = 'cg_eng_errmsg_dlogp5'
CG_ENG_ERRMSG_DLOGP6 = 'cg_eng_errmsg_dlogp6'
CG_ENG_ERRMSG_DLOGP7 = 'cg_eng_errmsg_dlogp7'
CG_ENG_ERRMSG_DLOGP8 = 'cg_eng_errmsg_dlogp8'
CG_ENG_ERRMSG_RCMD = 'cg_eng_errmsg_rcmd'
CG_ENG_ERRMSG_BCMD = 'cg_eng_errmsg_bcmd'
CG_ENG_CPU_UPTIME = 'cg_eng_cpu_uptime'
CG_ENG_CPU_LOAD1 = 'cg_eng_cpu_load1'
CG_ENG_CPU_LOAD5 = 'cg_eng_cpu_load5'
CG_ENG_CPU_LOAD15 = 'cg_eng_cpu_load15'
CG_ENG_MEMORY_RAM = 'cg_eng_memory_ram'
CG_ENG_MEMORY_FREE = 'cg_eng_memory_free'
CG_ENG_NPROC = 'cg_eng_nproc'
CG_ENG_MPIC_EFLAG = 'cg_eng_mpic_eflag'
CG_ENG_MPIC_MAIN_V = 'cg_eng_mpic_main_v'
CG_ENG_MPIC_MAIN_C = 'cg_eng_mpic_main_c'
CG_ENG_MPIC_BAT_V = 'cg_eng_mpic_bat_v'
CG_ENG_MPIC_BAT_C = 'cg_eng_mpic_bat_c'
CG_ENG_MPIC_TEMP1 = 'cg_eng_mpic_temp1'
CG_ENG_MPIC_TEMP2 = 'cg_eng_mpic_temp2'
CG_ENG_MPIC_HUMID = 'cg_eng_mpic_humid'
CG_ENG_MPIC_PRESS = 'cg_eng_mpic_press'
CG_ENG_MPIC_GF_ENA = 'cg_eng_mpic_gf_ena'
CG_ENG_MPIC_GFLT1 = 'cg_eng_mpic_gflt1'
CG_ENG_MPIC_GFLT2 = 'cg_eng_mpic_gflt2'
CG_ENG_MPIC_GFLT3 = 'cg_eng_mpic_gflt3'
CG_ENG_MPIC_GFLT4 = 'cg_eng_mpic_gflt4'
CG_ENG_MPIC_LD_ENA = 'cg_eng_mpic_ld_ena'
CG_ENG_MPIC_LDET1 = 'cg_eng_mpic_ldet1'
CG_ENG_MPIC_LDET2 = 'cg_eng_mpic_ldet2'
CG_ENG_MPIC_WSRC = 'cg_eng_mpic_wsrc'
CG_ENG_MPIC_IRID = 'cg_eng_mpic_irid'
CG_ENG_MPIC_IRID_V = 'cg_eng_mpic_irid_v'
CG_ENG_MPIC_IRID_C = 'cg_eng_mpic_irid_c'
CG_ENG_MPIC_IRID_E = 'cg_eng_mpic_irid_e'
CG_ENG_MPIC_FW_WIFI = 'cg_eng_mpic_fw_wifi'
CG_ENG_MPIC_FW_WIFI_V = 'cg_eng_mpic_fw_wifi_v'
CG_ENG_MPIC_FW_WIFI_C = 'cg_eng_mpic_fw_wifi_c'
CG_ENG_MPIC_FW_WIFI_E = 'cg_eng_mpic_fw_wifi_e'
CG_ENG_MPIC_GPS = 'cg_eng_mpic_gps'
CG_ENG_MPIC_SBD = 'cg_eng_mpic_sbd'
CG_ENG_MPIC_SBD_CE_MSG = 'cg_eng_mpic_sbd_ce_msg'
CG_ENG_MPIC_PPS = 'cg_eng_mpic_pps'
CG_ENG_MPIC_DCL = 'cg_eng_mpic_dcl'
CG_ENG_MPIC_ESW = 'cg_eng_mpic_esw'
CG_ENG_MPIC_DSL = 'cg_eng_mpic_dsl'
CG_ENG_MPIC_HBEAT_ENABLE = 'cg_eng_mpic_hbeat_enable'
CG_ENG_MPIC_HBEAT_DTIME = 'cg_eng_mpic_hbeat_dtime'
CG_ENG_MPIC_HBEAT_THRESHOLD = 'cg_eng_mpic_hbeat_threshold'
CG_ENG_MPIC_WAKE_CPM = 'cg_eng_mpic_wake_cpm'
CG_ENG_MPIC_WPC = 'cg_eng_mpic_wpc'
CG_ENG_MPIC_EFLAG2 = 'cg_eng_mpic_eflag2'
CG_ENG_MPIC_LAST_UPDATE = 'cg_eng_mpic_last_update'
CG_ENG_GPS_MSG_DATE = 'cg_eng_gps_msg_date'
CG_ENG_GPS_MSG_TIME = 'cg_eng_gps_msg_time'
CG_ENG_GPS_DATE = 'cg_eng_gps_date'
CG_ENG_GPS_TIME = 'cg_eng_gps_time'
CG_ENG_GPS_LATSTR = 'cg_eng_gps_latstr'
CG_ENG_GPS_LONSTR = 'cg_eng_gps_lonstr'
CG_ENG_GPS_LAT = 'cg_eng_gps_lat'
CG_ENG_GPS_LON = 'cg_eng_gps_lon'
CG_ENG_GPS_SPD = 'cg_eng_gps_spd'
CG_ENG_GPS_COG = 'cg_eng_gps_cog'
CG_ENG_GPS_FIX = 'cg_eng_gps_fix'
CG_ENG_GPS_NSAT = 'cg_eng_gps_nsat'
CG_ENG_GPS_HDOP = 'cg_eng_gps_hdop'
CG_ENG_GPS_ALT = 'cg_eng_gps_alt'
CG_ENG_GPS_LAST_UPDATE = 'cg_eng_gps_last_update'
CG_ENG_NTP_REFID = 'cg_eng_ntp_refid'
CG_ENG_NTP_OFFSET = 'cg_eng_ntp_offset'
CG_ENG_NTP_JITTER = 'cg_eng_ntp_jitter'
CG_ENG_PPS_LOCK = 'cg_eng_pps_lock'
CG_ENG_PPS_DELTA = 'cg_eng_pps_delta'
CG_ENG_PPS_DELTAMIN = 'cg_eng_pps_deltamin'
CG_ENG_PPS_DELTAMAX = 'cg_eng_pps_deltamax'
CG_ENG_PPS_BAD_PULSE = 'cg_eng_pps_bad_pulse'
CG_ENG_PPS_TIMESTAMP = 'cg_eng_pps_timestamp'
CG_ENG_PPS_LAST_UPDATE = 'cg_eng_pps_last_update'
CG_ENG_LOADSHED_STATUS = 'cg_eng_loadshed_status'
CG_ENG_LOADSHED_LAST_UPDATE = 'cg_eng_loadshed_last_update'
CG_ENG_SBC_ETH0 = 'cg_eng_sbc_eth0'
CG_ENG_SBC_ETH1 = 'cg_eng_sbc_eth1'
CG_ENG_SBC_LED0 = 'cg_eng_sbc_led0'
CG_ENG_SBC_LED1 = 'cg_eng_sbc_led1'
CG_ENG_SBC_LED2 = 'cg_eng_sbc_led2'
CG_ENG_SBC_GPO0 = 'cg_eng_sbc_gpo0'
CG_ENG_SBC_GPO1 = 'cg_eng_sbc_gpo1'
CG_ENG_SBC_GPO2 = 'cg_eng_sbc_gpo2'
CG_ENG_SBC_GPO3 = 'cg_eng_sbc_gpo3'
CG_ENG_SBC_GPO4 = 'cg_eng_sbc_gpo4'
CG_ENG_SBC_GPIO0 = 'cg_eng_sbc_gpio0'
CG_ENG_SBC_GPIO1 = 'cg_eng_sbc_gpio1'
CG_ENG_SBC_GPIO2 = 'cg_eng_sbc_gpio2'
CG_ENG_SBC_GPIO3 = 'cg_eng_sbc_gpio3'
CG_ENG_SBC_GPIO4 = 'cg_eng_sbc_gpio4'
CG_ENG_SBC_GPIO5 = 'cg_eng_sbc_gpio5'
CG_ENG_SBC_FB1 = 'cg_eng_sbc_fb1'
CG_ENG_SBC_FB2 = 'cg_eng_sbc_fb2'
CG_ENG_SBC_CE_LED = 'cg_eng_sbc_ce_led'
CG_ENG_SBC_WDT = 'cg_eng_sbc_wdt'
CG_ENG_SBC_BID = 'cg_eng_sbc_bid'
CG_ENG_SBC_BSTR = 'cg_eng_sbc_bstr'
CG_ENG_MSG_CNTS_D_GPS = 'cg_eng_msg_cnts_d_gps'
CG_ENG_MSG_CNTS_D_NTP = 'cg_eng_msg_cnts_d_ntp'
CG_ENG_MSG_CNTS_D_PPS = 'cg_eng_msg_cnts_d_pps'
CG_ENG_MSG_CNTS_D_SUPERV = 'cg_eng_msg_cnts_d_superv'
CG_ENG_MSG_CNTS_D_DLOG_NGR = 'cg_eng_msg_cnts_d_dlog_ngr'
CG_ENG_DCLP1_ENABLE = 'cg_eng_dclp1_enable'
CG_ENG_DCLP1_VOLT = 'cg_eng_dclp1_volt'
CG_ENG_DCLP1_CURRENT = 'cg_eng_dclp1_current'
CG_ENG_DCLP1_EFLAG = 'cg_eng_dclp1_eflag'
CG_ENG_DCLP1_VSEL = 'cg_eng_dclp1_vsel'
CG_ENG_DCLP1_CLIM = 'cg_eng_dclp1_clim'
CG_ENG_DCLP1_PROT = 'cg_eng_dclp1_prot'
CG_ENG_DCLP2_ENABLE = 'cg_eng_dclp2_enable'
CG_ENG_DCLP2_VOLT = 'cg_eng_dclp2_volt'
CG_ENG_DCLP2_CURRENT = 'cg_eng_dclp2_current'
CG_ENG_DCLP2_EFLAG = 'cg_eng_dclp2_eflag'
CG_ENG_DCLP2_VSEL = 'cg_eng_dclp2_vsel'
CG_ENG_DCLP2_CLIM = 'cg_eng_dclp2_clim'
CG_ENG_DCLP2_PROT = 'cg_eng_dclp2_prot'
CG_ENG_DCLP3_ENABLE = 'cg_eng_dclp3_enable'
CG_ENG_DCLP3_VOLT = 'cg_eng_dclp3_volt'
CG_ENG_DCLP3_CURRENT = 'cg_eng_dclp3_current'
CG_ENG_DCLP3_EFLAG = 'cg_eng_dclp3_eflag'
CG_ENG_DCLP3_VSEL = 'cg_eng_dclp3_vsel'
CG_ENG_DCLP3_CLIM = 'cg_eng_dclp3_clim'
CG_ENG_DCLP3_PROT = 'cg_eng_dclp3_prot'
CG_ENG_DCLP4_ENABLE = 'cg_eng_dclp4_enable'
CG_ENG_DCLP4_VOLT = 'cg_eng_dclp4_volt'
CG_ENG_DCLP4_CURRENT = 'cg_eng_dclp4_current'
CG_ENG_DCLP4_EFLAG = 'cg_eng_dclp4_eflag'
CG_ENG_DCLP4_VSEL = 'cg_eng_dclp4_vsel'
CG_ENG_DCLP4_CLIM = 'cg_eng_dclp4_clim'
CG_ENG_DCLP4_PROT = 'cg_eng_dclp4_prot'
CG_ENG_DCLP5_ENABLE = 'cg_eng_dclp5_enable'
CG_ENG_DCLP5_VOLT = 'cg_eng_dclp5_volt'
CG_ENG_DCLP5_CURRENT = 'cg_eng_dclp5_current'
CG_ENG_DCLP5_EFLAG = 'cg_eng_dclp5_eflag'
CG_ENG_DCLP5_VSEL = 'cg_eng_dclp5_vsel'
CG_ENG_DCLP5_CLIM = 'cg_eng_dclp5_clim'
CG_ENG_DCLP5_PROT = 'cg_eng_dclp5_prot'
CG_ENG_DCLP6_ENABLE = 'cg_eng_dclp6_enable'
CG_ENG_DCLP6_VOLT = 'cg_eng_dclp6_volt'
CG_ENG_DCLP6_CURRENT = 'cg_eng_dclp6_current'
CG_ENG_DCLP6_EFLAG = 'cg_eng_dclp6_eflag'
CG_ENG_DCLP6_VSEL = 'cg_eng_dclp6_vsel'
CG_ENG_DCLP6_CLIM = 'cg_eng_dclp6_clim'
CG_ENG_DCLP6_PROT = 'cg_eng_dclp6_prot'
CG_ENG_DCLP7_ENABLE = 'cg_eng_dclp7_enable'
CG_ENG_DCLP7_VOLT = 'cg_eng_dclp7_volt'
CG_ENG_DCLP7_CURRENT = 'cg_eng_dclp7_current'
CG_ENG_DCLP7_EFLAG = 'cg_eng_dclp7_eflag'
CG_ENG_DCLP7_VSEL = 'cg_eng_dclp7_vsel'
CG_ENG_DCLP7_CLIM = 'cg_eng_dclp7_clim'
CG_ENG_DCLP7_PROT = 'cg_eng_dclp7_prot'
CG_ENG_DCLP8_ENABLE = 'cg_eng_dclp8_enable'
CG_ENG_DCLP8_VOLT = 'cg_eng_dclp8_volt'
CG_ENG_DCLP8_CURRENT = 'cg_eng_dclp8_current'
CG_ENG_DCLP8_EFLAG = 'cg_eng_dclp8_eflag'
CG_ENG_DCLP8_VSEL = 'cg_eng_dclp8_vsel'
CG_ENG_DCLP8_CLIM = 'cg_eng_dclp8_clim'
CG_ENG_DCLP8_PROT = 'cg_eng_dclp8_prot'
CG_ENG_DCL_PORT_STATUS = 'cg_eng_dcl_port_status'
CG_ENG_PORT_DLOG1_NAME = 'cg_eng_port_dlog1_name'
CG_ENG_PORT_DLOG1_STATE = 'cg_eng_port_dlog1_state'
CG_ENG_PORT_DLOG1_TX = 'cg_eng_port_dlog1_tx'
CG_ENG_PORT_DLOG1_RX = 'cg_eng_port_dlog1_rx'
CG_ENG_PORT_DLOG1_LOG = 'cg_eng_port_dlog1_log'
CG_ENG_PORT_DLOG1_GOOD = 'cg_eng_port_dlog1_good'
CG_ENG_PORT_DLOG1_BAD = 'cg_eng_port_dlog1_bad'
CG_ENG_PORT_DLOG1_BB = 'cg_eng_port_dlog1_bb'
CG_ENG_PORT_DLOG1_LD = 'cg_eng_port_dlog1_ld'
CG_ENG_PORT_DLOG1_LC = 'cg_eng_port_dlog1_lc'
CG_ENG_PORT_DLOG1_LU = 'cg_eng_port_dlog1_lu'
CG_ENG_PORT_DLOG2_NAME = 'cg_eng_port_dlog2_name'
CG_ENG_PORT_DLOG2_STATE = 'cg_eng_port_dlog2_state'
CG_ENG_PORT_DLOG2_TX = 'cg_eng_port_dlog2_tx'
CG_ENG_PORT_DLOG2_RX = 'cg_eng_port_dlog2_rx'
CG_ENG_PORT_DLOG2_LOG = 'cg_eng_port_dlog2_log'
CG_ENG_PORT_DLOG2_GOOD = 'cg_eng_port_dlog2_good'
CG_ENG_PORT_DLOG2_BAD = 'cg_eng_port_dlog2_bad'
CG_ENG_PORT_DLOG2_BB = 'cg_eng_port_dlog2_bb'
CG_ENG_PORT_DLOG2_LD = 'cg_eng_port_dlog2_ld'
CG_ENG_PORT_DLOG2_LC = 'cg_eng_port_dlog2_lc'
CG_ENG_PORT_DLOG2_LU = 'cg_eng_port_dlog2_lu'
CG_ENG_PORT_DLOG3_NAME = 'cg_eng_port_dlog3_name'
CG_ENG_PORT_DLOG3_STATE = 'cg_eng_port_dlog3_state'
CG_ENG_PORT_DLOG3_TX = 'cg_eng_port_dlog3_tx'
CG_ENG_PORT_DLOG3_RX = 'cg_eng_port_dlog3_rx'
CG_ENG_PORT_DLOG3_LOG = 'cg_eng_port_dlog3_log'
CG_ENG_PORT_DLOG3_GOOD = 'cg_eng_port_dlog3_good'
CG_ENG_PORT_DLOG3_BAD = 'cg_eng_port_dlog3_bad'
CG_ENG_PORT_DLOG3_BB = 'cg_eng_port_dlog3_bb'
CG_ENG_PORT_DLOG3_LD = 'cg_eng_port_dlog3_ld'
CG_ENG_PORT_DLOG3_LC = 'cg_eng_port_dlog3_lc'
CG_ENG_PORT_DLOG3_LU = 'cg_eng_port_dlog3_lu'
CG_ENG_PORT_DLOG4_NAME = 'cg_eng_port_dlog4_name'
CG_ENG_PORT_DLOG4_STATE = 'cg_eng_port_dlog4_state'
CG_ENG_PORT_DLOG4_TX = 'cg_eng_port_dlog4_tx'
CG_ENG_PORT_DLOG4_RX = 'cg_eng_port_dlog4_rx'
CG_ENG_PORT_DLOG4_LOG = 'cg_eng_port_dlog4_log'
CG_ENG_PORT_DLOG4_GOOD = 'cg_eng_port_dlog4_good'
CG_ENG_PORT_DLOG4_BAD = 'cg_eng_port_dlog4_bad'
CG_ENG_PORT_DLOG4_BB = 'cg_eng_port_dlog4_bb'
CG_ENG_PORT_DLOG4_LD = 'cg_eng_port_dlog4_ld'
CG_ENG_PORT_DLOG4_LC = 'cg_eng_port_dlog4_lc'
CG_ENG_PORT_DLOG4_LU = 'cg_eng_port_dlog4_lu'
CG_ENG_PORT_DLOG5_NAME = 'cg_eng_port_dlog5_name'
CG_ENG_PORT_DLOG5_STATE = 'cg_eng_port_dlog5_state'
CG_ENG_PORT_DLOG5_TX = 'cg_eng_port_dlog5_tx'
CG_ENG_PORT_DLOG5_RX = 'cg_eng_port_dlog5_rx'
CG_ENG_PORT_DLOG5_LOG = 'cg_eng_port_dlog5_log'
CG_ENG_PORT_DLOG5_GOOD = 'cg_eng_port_dlog5_good'
CG_ENG_PORT_DLOG5_BAD = 'cg_eng_port_dlog5_bad'
CG_ENG_PORT_DLOG5_BB = 'cg_eng_port_dlog5_bb'
CG_ENG_PORT_DLOG5_LD = 'cg_eng_port_dlog5_ld'
CG_ENG_PORT_DLOG5_LC = 'cg_eng_port_dlog5_lc'
CG_ENG_PORT_DLOG5_LU = 'cg_eng_port_dlog5_lu'
CG_ENG_PORT_DLOG6_NAME = 'cg_eng_port_dlog6_name'
CG_ENG_PORT_DLOG6_STATE = 'cg_eng_port_dlog6_state'
CG_ENG_PORT_DLOG6_TX = 'cg_eng_port_dlog6_tx'
CG_ENG_PORT_DLOG6_RX = 'cg_eng_port_dlog6_rx'
CG_ENG_PORT_DLOG6_LOG = 'cg_eng_port_dlog6_log'
CG_ENG_PORT_DLOG6_GOOD = 'cg_eng_port_dlog6_good'
CG_ENG_PORT_DLOG6_BAD = 'cg_eng_port_dlog6_bad'
CG_ENG_PORT_DLOG6_BB = 'cg_eng_port_dlog6_bb'
CG_ENG_PORT_DLOG6_LD = 'cg_eng_port_dlog6_ld'
CG_ENG_PORT_DLOG6_LC = 'cg_eng_port_dlog6_lc'
CG_ENG_PORT_DLOG6_LU = 'cg_eng_port_dlog6_lu'
CG_ENG_PORT_DLOG7_NAME = 'cg_eng_port_dlog7_name'
CG_ENG_PORT_DLOG7_STATE = 'cg_eng_port_dlog7_state'
CG_ENG_PORT_DLOG7_TX = 'cg_eng_port_dlog7_tx'
CG_ENG_PORT_DLOG7_RX = 'cg_eng_port_dlog7_rx'
CG_ENG_PORT_DLOG7_LOG = 'cg_eng_port_dlog7_log'
CG_ENG_PORT_DLOG7_GOOD = 'cg_eng_port_dlog7_good'
CG_ENG_PORT_DLOG7_BAD = 'cg_eng_port_dlog7_bad'
CG_ENG_PORT_DLOG7_BB = 'cg_eng_port_dlog7_bb'
CG_ENG_PORT_DLOG7_LD = 'cg_eng_port_dlog7_ld'
CG_ENG_PORT_DLOG7_LC = 'cg_eng_port_dlog7_lc'
CG_ENG_PORT_DLOG7_LU = 'cg_eng_port_dlog7_lu'
CG_ENG_PORT_DLOG8_NAME = 'cg_eng_port_dlog8_name'
CG_ENG_PORT_DLOG8_STATE = 'cg_eng_port_dlog8_state'
CG_ENG_PORT_DLOG8_TX = 'cg_eng_port_dlog8_tx'
CG_ENG_PORT_DLOG8_RX = 'cg_eng_port_dlog8_rx'
CG_ENG_PORT_DLOG8_LOG = 'cg_eng_port_dlog8_log'
CG_ENG_PORT_DLOG8_GOOD = 'cg_eng_port_dlog8_good'
CG_ENG_PORT_DLOG8_BAD = 'cg_eng_port_dlog8_bad'
CG_ENG_PORT_DLOG8_BB = 'cg_eng_port_dlog8_bb'
CG_ENG_PORT_DLOG8_LC = 'cg_eng_port_dlog8_lc'
CG_ENG_PORT_DLOG8_LD = 'cg_eng_port_dlog8_ld'
CG_ENG_PORT_DLOG8_LU = 'cg_eng_port_dlog8_lu'
CG_ENG_DMGRSTATUS_DATE = 'cg_eng_dmgrstatus_date'
CG_ENG_DMGRSTATUS_TIME = 'cg_eng_dmgrstatus_time'
CG_ENG_DMGRSTATUS_ACTIVE = 'cg_eng_dmgrstatus_active'
CG_ENG_DMGRSTATUS_STARTED = 'cg_eng_dmgrstatus_started'
CG_ENG_DMGRSTATUS_HALTED = 'cg_eng_dmgrstatus_halted'
CG_ENG_DMGRSTATUS_FAILED = 'cg_eng_dmgrstatus_failed'
CG_ENG_DMGRSTATUS_MAP = 'cg_eng_dmgrstatus_map'
CG_ENG_DMGRSTATUS_UPDATE = 'cg_eng_dmgrstatus_update'
class CgStcEngStcParserDataAbstractParticle(DataParticle):
"""
Abstract Class for parsing data from the cg_stc_eng_stc data set
"""
_data_particle_type = None
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
a particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
result = []
# Instantiate the param_dict
params = self._build_param_dict()
# Go through the param_dict dictionary for every definition
params.update(self.raw_data)
encoding_errors = params.get_encoding_errors()
self._encoding_errors = encoding_errors
all_params = params.get_all()
for (key, value) in all_params.iteritems():
result.append({DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: value})
log.debug("CgStcEngStcParserDataParticle %s", result)
return result
def _build_param_dict(self):
"""
Populate the parameter dictionary with cg_stc_eng_stc parameters.
For each parameter key, add match stirng, match lambda function,
and value formatting function.
"""
# Add parameter handlers to parameter dict.
p = DatasetParameterDict()
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PLATFORM_UTIME,
r'Platform.utime=(\d+\.\d+)',
lambda match : float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PLATFORM_TIME,
r'Platform.time=(.+?)(\r\n?|\n)',
lambda match : match.group(1),
str)
# msg
msg_cnts_regex = r'STATUS\.msg_cnts=C_GPS=(\d+),\D+=(\d+),\D+=(\d+),\D+\=(\d+),\D+=(\d+),\D+=(\d+)(\r\n?|\n)'
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_GPS,
msg_cnts_regex, lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_NTP,
msg_cnts_regex, lambda match : int(match.group(2)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_PPS,
msg_cnts_regex, lambda match : int(match.group(3)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_POWER_SYS,
msg_cnts_regex, lambda match : int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_SUPERV,
msg_cnts_regex, lambda match : int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_TELEM,
msg_cnts_regex, lambda match : int(match.group(6)), int)
# err cnts
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_GPS,
self.gen_err_cnts('C_GPS'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_PPS,
self.gen_err_cnts('C_PPS'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_CTL,
self.gen_err_cnts('C_CTL'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_STATUS,
self.gen_err_cnts('C_STATUS'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_SUPERV,
self.gen_err_cnts('SUPERV'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_POWER_SYS,
self.gen_err_cnts('C_POWER_SYS'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_TELEM_SYS,
self.gen_err_cnts('C_TELEM_SYS'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_IRID,
self.gen_err_cnts('C_IRID'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_IMM,
self.gen_err_cnts('C_IMM'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_CPM1,
self.gen_err_cnts('CPM1'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_D_CTL,
self.gen_err_cnts('D_CTL'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_D_STATUS,
self.gen_err_cnts('D_STATUS'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_DLOG_MGR,
self.gen_err_cnts('DLOG_MGR'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_DLOGP1,
self.gen_err_cnts('DLOGP1'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_DLOGP2,
self.gen_err_cnts('DLOGP2'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_DLOGP3,
self.gen_err_cnts('DLOGP3'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_DLOGP4,
self.gen_err_cnts('DLOGP4'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_DLOGP5,
self.gen_err_cnts('DLOGP5'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_DLOGP6,
self.gen_err_cnts('DLOGP6'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_DLOGP7,
self.gen_err_cnts('DLOGP7'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_DLOGP8,
self.gen_err_cnts('DLOGP8'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_RCMD,
self.gen_err_cnts('RCMD'), lambda match : int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERR_BCMD,
self.gen_err_cnts('BCMD'), lambda match : int(match.group(1)), int)
# errmsg
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_GPS,
self.gen_errmsg('C_GPS'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_PPS,
self.gen_errmsg('C_PPS'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_CTL,
self.gen_errmsg('C_CTL'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_STATUS,
self.gen_errmsg('C_STATUS'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_SUPERV,
self.gen_errmsg('SUPERV'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_POWER_SYS,
self.gen_errmsg('C_POWER_SYS'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_TELEM_SYS,
self.gen_errmsg('C_TELEM_SYS'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_IRID,
self.gen_errmsg('C_IRID'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_IMM,
self.gen_errmsg('C_IMM'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_CPM1,
self.gen_errmsg('CPM1'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_D_CTL,
self.gen_errmsg('D_CTL'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_D_STATUS,
self.gen_errmsg('D_STATUS'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_DLOG_MGR,
self.gen_errmsg('DLOG_MGR'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_DLOGP1,
self.gen_errmsg('DLOGP1'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_DLOGP2,
self.gen_errmsg('DLOGP2'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_DLOGP3,
self.gen_errmsg('DLOGP3'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_DLOGP4,
self.gen_errmsg('DLOGP4'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_DLOGP5,
self.gen_errmsg('DLOGP5'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_DLOGP6,
self.gen_errmsg('DLOGP6'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_DLOGP7,
self.gen_errmsg('DLOGP7'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_DLOGP8,
self.gen_errmsg('DLOGP8'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_RCMD,
self.gen_errmsg('RCMD'), lambda match : match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_BCMD,
self.gen_errmsg('BCMD'), lambda match : match.group(1), str)
# cpu
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_CPU_UPTIME,
r'CPU\.uptime=(.+?)(\r\n?|\n)',
lambda match : match.group(1),
str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_CPU_LOAD1,
r'CPU\.load=(-?\d+\.\d+) (-?\d+\.\d+) (-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_CPU_LOAD5,
r'CPU\.load=(-?\d+\.\d+) (-?\d+\.\d+) (-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(2)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_CPU_LOAD15,
r'CPU\.load=(-?\d+\.\d+) (-?\d+\.\d+) (-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(3)),
float)
# memory
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MEMORY_RAM,
'CPU\.memory=Ram: (\d+)k Free: (.+)k',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MEMORY_FREE,
'CPU\.memory=Ram: (\d+)k Free: (.+)k',
lambda match: int(match.group(2)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_NPROC,
'CPU\.nproc=(\d+)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
# mpic
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_EFLAG,
'MPIC\.eflag=(.+?)(\r\n?|\n)',
lambda match: int('0x'+match.group(1),0),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_MAIN_V,
'MPIC\.main_v=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_MAIN_C,
'MPIC\.main_c=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_BAT_V,
'MPIC\.bbat_v=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_BAT_C,
'MPIC\.bbat_c=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_TEMP1,
r'MPIC\.temp=(-?\d+\.\d+) (-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_TEMP2,
r'MPIC\.temp=(-?\d+\.\d+) (-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(2)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_HUMID,
r'MPIC\.humid=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_PRESS,
r'MPIC\.press=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GF_ENA,
r'MPIC\.gf_ena=(.+?)(\r\n?|\n)',
lambda match: int('0x'+match.group(1),0),
int)
#gflt
gflt_regex = r'MPIC\.gflt=(-?\d+\.\d+) (-?\d+\.\d+) (-?\d+\.\d+) (-?\d+\.\d+)'
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT1,
gflt_regex, lambda match: float(match.group(1)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT2,
gflt_regex, lambda match: float(match.group(2)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT3,
gflt_regex, lambda match: float(match.group(3)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT4,
gflt_regex, lambda match: float(match.group(4)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_LD_ENA,
r'MPIC\.ld_ena=(.+?)(\r\n?|\n)',
lambda match: int('0x'+match.group(1),0),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_LDET1,
r'MPIC\.ldet=(-?\d+\.\d+) (-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_LDET2,
r'MPIC\.ldet=(-?\d+\.\d+) (-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(2)),
float)
# mpic hotel
mpic_hotel_regex = r'MPIC\.hotel=wake (\d+) ir (\d+) (-?\d+\.\d+) (-?\d+\.\d+) (\d+) ' \
'fwwf (\d+) (-?\d+\.\d+) (-?\d+\.\d+) (\d+) gps (\d+) sbd (\d+) (\d+) pps (\d+) ' \
'dcl (\w\w) esw (\w) dsl (\w)'
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_WSRC,
mpic_hotel_regex, lambda match: int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_IRID,
mpic_hotel_regex, lambda match: int(match.group(2)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_IRID_V,
mpic_hotel_regex, lambda match: float(match.group(3)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_IRID_C,
mpic_hotel_regex, lambda match: float(match.group(4)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_IRID_E,
mpic_hotel_regex, lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_FW_WIFI,
mpic_hotel_regex, lambda match: int(match.group(6)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_FW_WIFI_V,
mpic_hotel_regex, lambda match: float(match.group(7)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_FW_WIFI_C,
mpic_hotel_regex, lambda match: float(match.group(8)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_FW_WIFI_E,
mpic_hotel_regex, lambda match: int(match.group(9)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GPS,
mpic_hotel_regex, lambda match: int(match.group(10)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_SBD,
mpic_hotel_regex, lambda match: int(match.group(11)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_SBD_CE_MSG,
mpic_hotel_regex, lambda match: int(match.group(12)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_PPS,
mpic_hotel_regex, lambda match: int(match.group(13)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_DCL,
mpic_hotel_regex, lambda match: int(match.group(14)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_ESW,
mpic_hotel_regex, lambda match: int(match.group(15)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_DSL,
mpic_hotel_regex, lambda match: int(match.group(16)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_HBEAT_ENABLE,
r'MPIC\.cpm_hb=enable (\d+) dtime (\d+) threshold (\d+)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_HBEAT_DTIME,
r'MPIC\.cpm_hb=enable (\d+) dtime (\d+) threshold (\d+)',
lambda match: int(match.group(2)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_HBEAT_THRESHOLD,
r'MPIC\.cpm_hb=enable (\d+) dtime (\d+) threshold (\d+)',
lambda match: int(match.group(3)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_WAKE_CPM,
r'MPIC\.wake_cpm=wtc (-?\d+\.\d+) wpc (\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_WPC,
r'MPIC\.wake_cpm=wtc (\d+\.\d+) wpc (\d+)(\r\n?|\n)',
lambda match: int(match.group(2)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_EFLAG2,
r'MPIC\.stc_eflag2=(.+?)(\r\n?|\n)',
lambda match: int('0x'+match.group(1),0),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_LAST_UPDATE,
r'MPIC\.last_update=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
# gps
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_MSG_DATE,
r'GPS\.timestamp=(\d{4}/\d{2}/\d{2}) (\d{2}:\d{2}:\d{2}\.\d{3})',
lambda match: match.group(1),
str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_MSG_TIME,
r'GPS\.timestamp=(\d{4}/\d{2}/\d{2}) (\d{2}:\d{2}:\d{2}\.\d{3})',
lambda match: match.group(2),
str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_DATE,
r'GPS.date=(.+?)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_TIME,
r'GPS.time=(.+?)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LATSTR,
r'GPS.lat_str=(.+?)(\r\n?|\n)',
lambda match: match.group(1),
str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LONSTR,
r'GPS.lon_str=(.+?)(\r\n?|\n)',
lambda match: match.group(1),
str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LAT,
r'GPS.lat=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LON,
r'GPS.lon=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_SPD,
r'GPS.spd=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_COG,
r'GPS\.cog=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_FIX,
r'GPS\.fix_q=(\d+)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_NSAT,
r'GPS\.nsat=(\d+)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_HDOP,
r'GPS\.hdop=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_ALT,
r'GPS\.alt=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LAST_UPDATE,
r'GPS\.last_update=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
# ntp
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_NTP_REFID,
r'NTP\.refid=(.+?)(\r\n?|\n)',
lambda match: match.group(1),
str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_NTP_OFFSET,
r'NTP\.offset=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_NTP_JITTER,
r'NTP\.jitter=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
# pps status
pps_regex = r'PPS\.status=C_PPS: NMEA_Lock: (.+) Delta: (.+) DeltaMin: ' \
'(.+) DeltaMax: (.+) BadPulses: (.+) TS: (.+?)(\r\n?|\n)'
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PPS_LOCK,
pps_regex, lambda match: match.group(1), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PPS_DELTA,
pps_regex, lambda match: int(match.group(2)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PPS_DELTAMIN,
pps_regex, lambda match: int(match.group(3)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PPS_DELTAMAX,
pps_regex, lambda match: int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PPS_BAD_PULSE,
pps_regex, lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PPS_TIMESTAMP,
pps_regex, lambda match: match.group(6), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PPS_LAST_UPDATE,
r'PPS\.last_update=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_LOADSHED_STATUS,
r'LoadShed\.status=(.+?)(\r\n?|\n)',
lambda match: match.group(1),
str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_LOADSHED_LAST_UPDATE,
r'LoadShed\.last_update=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_ETH0,
r'sbc\.eth0=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_ETH1,
r'sbc\.eth1=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_LED0,
r'sbc\.led0=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_LED1,
r'sbc\.led1=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_LED2,
r'sbc\.led2=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPO0,
r'sbc\.gpo0=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPO1,
r'sbc\.gpo1=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPO2,
r'sbc\.gpo2=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPO3,
r'sbc\.gpo3=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPO4,
r'sbc\.gpo4=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPIO0,
r'sbc\.gpi0=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPIO1,
r'sbc\.gpi1=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPIO2,
r'sbc\.gpi2=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPIO3,
r'sbc\.gpi3=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPIO4,
r'sbc\.gpi4=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_GPIO5,
r'sbc\.gpi5=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_FB1,
r'sbc\.fb1=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_FB2,
r'sbc\.fb2=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_CE_LED,
r'sbc\.ce_led=(\d)(\r\n?|\n)',
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_WDT,
r'sbc\.wdt=(0x.+)(\r\n?|\n)',
lambda match: int(match.group(1),0),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_BID,
r'sbc\.bid=(0x.+)(\r\n?|\n)',
lambda match: int(match.group(1),0),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_SBC_BSTR,
r'sbc\.bstr=(0x.+)(\r\n?|\n)',
lambda match: int(match.group(1),0),
int)
# msg cnts d
msg_cnts_d_regex = r'STATUS.msg_cnts=D_GPS=(\d+), NTP=(\d+), D_PPS=(\d+), ' \
'SUPERV=(\d+), DLOG_MGR=(\d+)'
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_D_GPS,
msg_cnts_d_regex,
lambda match: int(match.group(1)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_D_NTP,
msg_cnts_d_regex,
lambda match: int(match.group(2)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_D_PPS,
msg_cnts_d_regex,
lambda match: int(match.group(3)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_D_SUPERV,
msg_cnts_d_regex,
lambda match: int(match.group(4)),
int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_D_DLOG_NGR,
msg_cnts_d_regex,
lambda match: int(match.group(5)),
int)
# dclp1
dclp1_regex = self.gen_dclp_regex(1)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP1_ENABLE,
dclp1_regex, lambda match: int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP1_VOLT,
dclp1_regex, lambda match: float(match.group(2)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP1_CURRENT,
dclp1_regex, lambda match: float(match.group(3)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP1_EFLAG,
dclp1_regex, lambda match: int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP1_VSEL,
dclp1_regex, lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP1_CLIM,
dclp1_regex, lambda match: int(match.group(6)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP1_PROT,
dclp1_regex, lambda match: int(match.group(7)), int)
# dclp2
dclp2_regex = self.gen_dclp_regex(2)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP2_ENABLE,
dclp2_regex, lambda match: int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP2_VOLT,
dclp2_regex, lambda match: float(match.group(2)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP2_CURRENT,
dclp2_regex, lambda match: float(match.group(3)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP2_EFLAG,
dclp2_regex, lambda match: int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP2_VSEL,
dclp2_regex, lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP2_CLIM,
dclp2_regex, lambda match: int(match.group(6)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP2_PROT,
dclp2_regex, lambda match: int(match.group(7)), int)
# dclp3
dclp3_regex = self.gen_dclp_regex(3)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP3_ENABLE,
dclp3_regex, lambda match: int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP3_VOLT,
dclp3_regex, lambda match: float(match.group(2)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP3_CURRENT,
dclp3_regex, lambda match: float(match.group(3)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP3_EFLAG,
dclp3_regex, lambda match: int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP3_VSEL,
dclp3_regex, lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP3_CLIM,
dclp3_regex, lambda match: int(match.group(6)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP3_PROT,
dclp3_regex, lambda match: int(match.group(7)), int)
# dclp4
dclp4_regex = self.gen_dclp_regex(4)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP4_ENABLE,
dclp4_regex, lambda match: int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP4_VOLT,
dclp4_regex, lambda match: float(match.group(2)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP4_CURRENT,
dclp4_regex, lambda match: float(match.group(3)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP4_EFLAG,
dclp4_regex, lambda match: int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP4_VSEL,
dclp4_regex, lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP4_CLIM,
dclp4_regex, lambda match: int(match.group(6)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP4_PROT,
dclp4_regex, lambda match: int(match.group(7)), int)
# dclp5
dclp5_regex = self.gen_dclp_regex(5)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP5_ENABLE,
dclp5_regex, lambda match: int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP5_VOLT,
dclp5_regex, lambda match: float(match.group(2)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP5_CURRENT,
dclp5_regex, lambda match: float(match.group(3)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP5_EFLAG,
dclp5_regex, lambda match: int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP5_VSEL,
dclp5_regex, lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP5_CLIM,
dclp5_regex, lambda match: int(match.group(6)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP5_PROT,
dclp5_regex, lambda match: int(match.group(7)), int)
# dclp6
dclp6_regex = self.gen_dclp_regex(6)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP6_ENABLE,
dclp6_regex, lambda match: int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP6_VOLT,
dclp6_regex, lambda match: float(match.group(2)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP6_CURRENT,
dclp6_regex, lambda match: float(match.group(3)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP6_EFLAG,
dclp6_regex, lambda match: int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP6_VSEL,
dclp6_regex, lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP6_CLIM,
dclp6_regex, lambda match: int(match.group(6)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP6_PROT,
dclp6_regex, lambda match: int(match.group(7)), int)
# dclp7
dclp7_regex = self.gen_dclp_regex(7)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP7_ENABLE,
dclp7_regex, lambda match: int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP7_VOLT,
dclp7_regex, lambda match: float(match.group(2)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP7_CURRENT,
dclp7_regex, lambda match: float(match.group(3)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP7_EFLAG,
dclp7_regex, lambda match: int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP7_VSEL,
dclp7_regex, lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP7_CLIM,
dclp7_regex, lambda match: int(match.group(6)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP7_PROT,
dclp7_regex, lambda match: int(match.group(7)), int)
# dclp8
dclp8_regex = self.gen_dclp_regex(8)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP8_ENABLE,
dclp8_regex, lambda match: int(match.group(1)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP8_VOLT,
dclp8_regex, lambda match: float(match.group(2)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP8_CURRENT,
dclp8_regex, lambda match: float(match.group(3)), float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP8_EFLAG,
dclp8_regex, lambda match: int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP8_VSEL,
dclp8_regex, lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP8_CLIM,
dclp8_regex, lambda match: int(match.group(6)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCLP8_PROT,
dclp8_regex, lambda match: int(match.group(7)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DCL_PORT_STATUS,
r'DCL.pstatus.last_update=(-?\d+\.\d+)',
lambda match: float(match.group(1)),
float)
DLOG_REGEX = r'(.+) (.+) tx: (\d+) rx: (\d+) log: (\d+) '\
'good: (\d+) bad: (\d+) bb: (\d+) ld: '\
'([-\d]+)\s+(lc:\s+([-\d]+))?\s+lu:\s?([-\d.]+)(\r\n?|\n)'
DLOG_LC_REGEX = r'(.+) (.+) tx: (\d+) rx: (\d+) log: (\d+) '\
'good: (\d+) bad: (\d+) bb: (\d+) ld: '\
'([-\d]+)\s+(lc:\s+([-\d]+))\s+lu:\s?([-\d.]+)(\r\n?|\n)'
#1
dlogp1_regex = r'DLOGP1=' + DLOG_REGEX
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_NAME,
dlogp1_regex,lambda match: match.group(1),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_STATE,
dlogp1_regex,lambda match: match.group(2),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_TX,
dlogp1_regex,lambda match: long(match.group(3)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_RX,
dlogp1_regex,lambda match: long(match.group(4)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_LOG,
dlogp1_regex,lambda match: long(match.group(5)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_GOOD,
dlogp1_regex,lambda match: long(match.group(6)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_BAD,
dlogp1_regex,lambda match: long(match.group(7)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_BB,
dlogp1_regex,lambda match: long(match.group(8)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_LD,
dlogp1_regex,lambda match: long(match.group(9)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_LC,
r'DLOGP1='+DLOG_LC_REGEX,lambda match: float(match.group(11)),float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG1_LU,
dlogp1_regex,lambda match: float(match.group(12)),float)
#2
dlogp2_regex = r'DLOGP2=' + DLOG_REGEX
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_NAME,
dlogp2_regex,lambda match: match.group(1),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_STATE,
dlogp2_regex,lambda match: match.group(2),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_TX,
dlogp2_regex,lambda match: long(match.group(3)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_RX,
dlogp2_regex,lambda match: long(match.group(4)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_LOG,
dlogp2_regex,lambda match: long(match.group(5)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_GOOD,
dlogp2_regex,lambda match: long(match.group(6)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_BAD,
dlogp2_regex,lambda match: long(match.group(7)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_BB,
dlogp2_regex,lambda match: long(match.group(8)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_LD,
dlogp2_regex,lambda match: long(match.group(9)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_LC,
r'DLOGP2='+DLOG_LC_REGEX,lambda match: float(match.group(11)),float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG2_LU,
dlogp2_regex,lambda match: float(match.group(12)),float)
#3
dlogp3_regex = r'DLOGP3='+DLOG_REGEX
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_NAME,
dlogp3_regex,lambda match: match.group(1),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_STATE,
dlogp3_regex,lambda match: match.group(2),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_TX,
dlogp3_regex,lambda match: long(match.group(3)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_RX,
dlogp3_regex,lambda match: long(match.group(4)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_LOG,
dlogp3_regex,lambda match: long(match.group(5)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_GOOD,
dlogp3_regex,lambda match: long(match.group(6)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_BAD,
dlogp3_regex,lambda match: long(match.group(7)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_BB,
dlogp3_regex,lambda match: long(match.group(8)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_LD,
dlogp3_regex,lambda match: long(match.group(9)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_LC,
r'DLOGP3='+DLOG_LC_REGEX,lambda match: float(match.group(11)),float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG3_LU,
dlogp3_regex,lambda match: float(match.group(12)),float)
#4
dlogp4_regex = r'DLOGP4='+DLOG_REGEX
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_NAME,
dlogp4_regex,lambda match: match.group(1),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_STATE,
dlogp4_regex,lambda match: match.group(2),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_TX,
dlogp4_regex,lambda match: long(match.group(3)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_RX,
dlogp4_regex,lambda match: long(match.group(4)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_LOG,
dlogp4_regex,lambda match: long(match.group(5)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_GOOD,
dlogp4_regex,lambda match: long(match.group(6)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_BAD,
dlogp4_regex,lambda match: long(match.group(7)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_BB,
dlogp4_regex,lambda match: long(match.group(8)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_LD,
dlogp4_regex,lambda match: long(match.group(9)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_LC,
r'DLOGP4='+DLOG_LC_REGEX,lambda match: float(match.group(11)),float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG4_LU,
dlogp4_regex,lambda match: float(match.group(12)),float)
#5
dlogp5_regex = r'DLOGP5='+DLOG_REGEX
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_NAME,
dlogp5_regex,lambda match: match.group(1),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_STATE,
dlogp5_regex,lambda match: match.group(2),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_TX,
dlogp5_regex,lambda match: long(match.group(3)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_RX,
dlogp5_regex,lambda match: long(match.group(4)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_LOG,
dlogp5_regex,lambda match: long(match.group(5)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_GOOD,
dlogp5_regex,lambda match: long(match.group(6)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_BAD,
dlogp5_regex,lambda match: long(match.group(7)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_BB,
dlogp5_regex,lambda match: long(match.group(8)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_LD,
dlogp5_regex,lambda match: long(match.group(9)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_LC,
r'DLOGP5='+DLOG_LC_REGEX,lambda match: float(match.group(11)),float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG5_LU,
dlogp5_regex,lambda match: float(match.group(12)),float)
#6
dlogp6_regex = r'DLOGP6='+DLOG_REGEX
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_NAME,
dlogp6_regex,lambda match: match.group(1),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_STATE,
dlogp6_regex,lambda match: match.group(2),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_TX,
dlogp6_regex,lambda match: long(match.group(3)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_RX,
dlogp6_regex,lambda match: long(match.group(4)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_LOG,
dlogp6_regex,lambda match: long(match.group(5)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_GOOD,
dlogp6_regex,lambda match: long(match.group(6)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_BAD,
dlogp6_regex,lambda match: long(match.group(7)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_BB,
dlogp6_regex,lambda match: long(match.group(8)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_LD,
dlogp6_regex,lambda match: long(match.group(9)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_LC,
r'DLOGP6='+DLOG_LC_REGEX,lambda match: float(match.group(11)),float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG6_LU,
dlogp6_regex,lambda match: float(match.group(12)),float)
#7
dlogp7_regex = r'DLOGP7='+DLOG_REGEX
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_NAME,
dlogp7_regex,lambda match: match.group(1),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_STATE,
dlogp7_regex,lambda match: match.group(2),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_TX,
dlogp7_regex,lambda match: long(match.group(3)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_RX,
dlogp7_regex,lambda match: long(match.group(4)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_LOG,
dlogp7_regex,lambda match: long(match.group(5)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_GOOD,
dlogp7_regex,lambda match: long(match.group(6)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_BAD,
dlogp7_regex,lambda match: long(match.group(7)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_BB,
dlogp7_regex,lambda match: long(match.group(8)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_LD,
dlogp7_regex,lambda match: long(match.group(9)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_LC,
r'DLOGP7='+DLOG_LC_REGEX,lambda match: float(match.group(11)),float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG7_LU,
dlogp7_regex,lambda match: float(match.group(12)),float)
#8
dlogp8_regex = r'DLOGP8='+DLOG_REGEX
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_NAME,
dlogp8_regex,lambda match: match.group(1),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_STATE,
dlogp8_regex,lambda match: match.group(2),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_TX,
dlogp8_regex,lambda match: long(match.group(3)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_RX,
dlogp8_regex,lambda match: long(match.group(4)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_LOG,
dlogp8_regex,lambda match: long(match.group(5)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_GOOD,
dlogp8_regex,lambda match: long(match.group(6)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_BAD,
dlogp8_regex,lambda match: long(match.group(7)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_BB,
dlogp8_regex,lambda match: long(match.group(8)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_LD,
dlogp8_regex,lambda match: long(match.group(9)),long)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_LC,
r'DLOGP8='+DLOG_LC_REGEX,lambda match: float(match.group(11)),float)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_PORT_DLOG8_LU,
dlogp8_regex,lambda match: float(match.group(12)),float)
DMGR_REGEX = r'DMGR.status=dmgrstatus: (\d{4}/\d{2}/\d{2}) (\d{2}:\d{2}:\d{2}\.\d+) '\
'act:(\d+) str:(\d+) hlt:(\d+) fld:(\d+) map:(.+)(\r\n?|\n)'
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DMGRSTATUS_DATE,
DMGR_REGEX,lambda match: match.group(1),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DMGRSTATUS_TIME,
DMGR_REGEX,lambda match: match.group(2),str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DMGRSTATUS_ACTIVE,
DMGR_REGEX,lambda match: int(match.group(3)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DMGRSTATUS_STARTED,
DMGR_REGEX,lambda match: int(match.group(4)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DMGRSTATUS_HALTED,
DMGR_REGEX,lambda match: int(match.group(5)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DMGRSTATUS_FAILED,
DMGR_REGEX,lambda match: int(match.group(6)), int)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DMGRSTATUS_MAP,
DMGR_REGEX,lambda match: match.group(7), str)
p.add(CgStcEngStcParserDataParticleKey.CG_ENG_DMGRSTATUS_UPDATE,
r'DMGR.last_update=(-?\d+\.\d+)(\r\n?|\n)',
lambda match: float(match.group(1)),
float)
return p
def gen_dclp_regex(self, port_number):
"""
generate the regex to find the DCL port based on the port number
"""
return r'DCL\.port\.%s=(-?\d) +(-?\d+\.\d+) +(-?\d+\.\d+) +(\d) +vsel: ' \
'(-?\d+) clim: (-?\d+) prot: (-?\d+)(\r\n?|\n)' % port_number
def gen_err_cnts(self, err_id_str):
"""
generate the regex to find the status error counts based on an id string
"""
return r'STATUS\.err_cnts=.*%s=(-?\d+).*(\r\n?|\n)' % err_id_str
def gen_errmsg(self, err_id_str):
"""
generate the error message regex to find the error message based on an id string
"""
return r'STATUS\.last_err\.%s=(.+?)(\r\n?|\n)' % err_id_str
class CgStcEngStcParserDataParticle(CgStcEngStcParserDataAbstractParticle):
"""
Class for parsing data from the cg_stc_eng_stc data set
"""
_data_particle_type = CgDataParticleType.TELEMETERED
class CgStcEngStcParserRecoveredDataParticle(CgStcEngStcParserDataAbstractParticle):
_data_particle_type = CgDataParticleType.RECOVERED
class CgStcEngStcParser(Parser):
def __init__(self,
config,
state,
stream_handle,
state_callback,
publish_callback,
exception_callback,
*args, **kwargs):
# no sieve function since we are not using the chunker here
super(CgStcEngStcParser, self).__init__(config,
stream_handle,
state,
None,
state_callback,
publish_callback,
exception_callback)
# no setting state since there is no state here, 1 file = 1 particle
def process_file(self):
# get records expects to return a list
record_list = []
self._eng_str = self._stream_handle.read()
# confirm we have actually read data, get_records may get called more than once for this
# file but if we are already done reading it we will read no data
if len(self._eng_str) > 0:
# Read the first timestamp in from the stream_handle
utime_grp = re.search(r'Platform.utime=(.+?)(\r\n?|\n)', self._eng_str)
if utime_grp and utime_grp.group(1):
self._timestamp = ntplib.system_to_ntp_time(float(utime_grp.group(1)))
log.debug("extracting sample with timestamp %f", self._timestamp)
sample = self._extract_sample(self._particle_class, None, self._eng_str, internal_timestamp=self._timestamp)
if sample:
record_list.append(sample)
else:
raise SampleException("STC Engineering input file has no UTIME associated with it")
return record_list
def get_records(self, num_records):
"""
Go ahead and execute the data parsing. This involves
getting data from the file, then parsing it and publishing.
@param num_records The number of records to gather
@retval Return the list of particles requested, [] if none available
"""
if num_records <= 0:
return []
# there is only one file producing one sample, process it
record = self.process_file()
# if a record was returned, publish it
if record:
self._publish_sample(record)
# set the state to None since there is no state, and the file ingested flag to True
# if no record was returned still set it to True because we have processed the whole file
self._state_callback(None, True)
return record
def set_state(self, state):
"""
Need to override this to pass since we have no state
"""
pass
| bsd-2-clause |
mortonjt/scipy | tools/refguide_check.py | 10 | 3651 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
"""
import sys
import re
import copy
import inspect
from argparse import ArgumentParser, REMAINDER
import scipy
from scipy import (cluster, constants, fftpack, integrate, interpolate, io,
linalg, misc, ndimage, odr, optimize, signal, sparse,
spatial, special, stats)
# TODO: sparse.csgraph, sparse.linalg, stats.mstats, cluster.vq,
# cluster.hierarchy
def find_funcnames(module):
funcnames = set()
# 3 spaces followed by function name; only function names listed in
# refguide are indented like this (mostly, there may be some false
# positives)
pattern = re.compile("(\s\s\s[a-z_0-9A-Z]+)")
for line in module.__doc__.splitlines():
res = re.search(pattern, line)
if res is not None:
funcname = res.groups()[0].lstrip()
funcnames.add(funcname)
return funcnames
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
all = copy.deepcopy(module.__all__)
for name in ['absolute_import', 'division', 'print_function']:
try:
all.remove(name)
except ValueError:
pass
# somehow some modules survive the first iteration (?)
for _ in range(2):
for name in all:
if inspect.ismodule(getattr(module, name)):
all.remove(name)
return all
def compare(all, funcnames):
"""Return sets of objects only in one of __all__, refguide."""
only_all = set()
for name in all:
if name not in funcnames:
only_all.add(name)
only_ref = set()
for name in funcnames:
if name not in all:
only_ref.add(name)
return only_all, only_ref
def report(all, funcnames, module_name):
"""Print out a report for the module"""
num_all = len(all)
num_ref = len(funcnames)
print("Number of functions in __all__: %i" % num_all)
print("Number of functions in refguide: %i" % num_ref)
only_all, only_ref = compare(all, funcnames)
if len(only_all) == len(only_ref) == 0:
print("\nAll good!")
else:
if len(only_all) > 0:
print("")
print("Objects in %s.__all__ but not in refguide:" % module_name)
print("------------------------------------------")
for name in only_all:
print(name)
if len(only_ref) > 0:
print("")
print("Objects in refguide but not in %s.__all__:" % module_name)
print("------------------------------------------")
for name in only_ref:
print(name)
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_name", metavar="ARGS", default=[],
nargs=REMAINDER, help="Valid Scipy submodule name")
args = parser.parse_args(argv)
module_name = args.module_name[0]
module = getattr(scipy, module_name)
funcnames = find_funcnames(module)
all = get_all_dict(module)
report(all, funcnames, module_name)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
0x0mar/phpsploit | deps/pyparsing-2.0.2/examples/sql2dot.py | 6 | 3033 | #!/usr/bin/python
# sql2dot.py
#
# Creates table graphics by parsing SQL table DML commands and
# generating DOT language output.
#
# Adapted from a post at http://energyblog.blogspot.com/2006/04/blog-post_20.html.
#
sampleSQL = """
create table student
(
student_id integer primary key,
firstname varchar(20),
lastname varchar(40),
address1 varchar(80),
address2 varchar(80),
city varchar(30),
state varchar(2),
zipcode varchar(10),
dob date
);
create table classes
(
class_id integer primary key,
id varchar(8),
maxsize integer,
instructor varchar(40)
);
create table student_registrations
(
reg_id integer primary key,
student_id integer,
class_id integer
);
alter table only student_registrations
add constraint students_link
foreign key
(student_id) references students(student_id);
alter table only student_registrations
add constraint classes_link
foreign key
(class_id) references classes(class_id);
""".upper()
from pyparsing import Literal, CaselessLiteral, Word, delimitedList \
,Optional, Combine, Group, alphas, nums, alphanums, Forward \
, oneOf, sglQuotedString, OneOrMore, ZeroOrMore, CharsNotIn \
, replaceWith
skobki = "(" + ZeroOrMore(CharsNotIn(")")) + ")"
field_def = OneOrMore(Word(alphas,alphanums+"_\"':-") | skobki)
def field_act(s,loc,tok):
return ("<"+tok[0]+"> " + " ".join(tok)).replace("\"","\\\"")
field_def.setParseAction(field_act)
field_list_def = delimitedList( field_def )
def field_list_act(toks):
return " | ".join(toks)
field_list_def.setParseAction(field_list_act)
create_table_def = Literal("CREATE") + "TABLE" + Word(alphas,alphanums+"_").setResultsName("tablename") + \
"("+field_list_def.setResultsName("columns")+")"+ ";"
def create_table_act(toks):
return """"%(tablename)s" [\n\t label="<%(tablename)s> %(tablename)s | %(columns)s"\n\t shape="record"\n];""" % toks
create_table_def.setParseAction(create_table_act)
add_fkey_def=Literal("ALTER")+"TABLE"+"ONLY" + Word(alphanums+"_").setResultsName("fromtable") + "ADD" \
+ "CONSTRAINT" + Word(alphanums+"_") + "FOREIGN"+"KEY"+"("+Word(alphanums+"_").setResultsName("fromcolumn")+")" \
+"REFERENCES"+Word(alphanums+"_").setResultsName("totable")+"("+Word(alphanums+"_").setResultsName("tocolumn")+")"+";"
def add_fkey_act(toks):
return """ "%(fromtable)s":%(fromcolumn)s -> "%(totable)s":%(tocolumn)s """ % toks
add_fkey_def.setParseAction(add_fkey_act)
other_statement_def = ( OneOrMore(CharsNotIn(";") ) + ";")
other_statement_def.setParseAction( replaceWith("") )
comment_def = "--" + ZeroOrMore(CharsNotIn("\n"))
comment_def.setParseAction( replaceWith("") )
statement_def = comment_def | create_table_def | add_fkey_def | other_statement_def
defs = OneOrMore(statement_def)
print("""digraph g { graph [ rankdir = "LR" ]; """)
for i in defs.parseString(sampleSQL):
if i!="":
print(i)
print("}") | gpl-3.0 |
fengshao0907/vitess | third_party/py/bson-0.3.2/bson/codec.py | 26 | 10257 | #!/usr/bin/python -OOOO
# vim: set fileencoding=utf8 shiftwidth=4 tabstop=4 textwidth=80 foldmethod=marker :
# Copyright (c) 2010, Kou Man Tong. All rights reserved.
# For licensing, see LICENSE file included in the package.
"""
Base codec functions for bson.
"""
import struct
import cStringIO
import calendar
from datetime import datetime
import warnings
from abc import ABCMeta, abstractmethod
# serialization optimizations
length_struct = struct.Struct('<i')
int64_struct = struct.Struct('<q')
uint64_struct = struct.Struct('<Q')
binary_struct = struct.Struct('<ib')
double_struct = struct.Struct('<d')
boolean_struct = struct.Struct('<b')
unpack_length = length_struct.unpack_from
unpack_binary_struct = binary_struct.unpack_from
# Error Classes
class MissingClassDefinition(ValueError):
def __init__(self, class_name):
super(MissingClassDefinition, self).__init__(
"No class definition for class %s" % (class_name,))
#
# Warning Classes
class MissingTimezoneWarning(RuntimeWarning):
def __init__(self, *args):
args = list(args)
if len(args) < 1:
args.append("Input datetime object has no tzinfo, assuming UTC.")
super(MissingTimezoneWarning, self).__init__(*args)
#
# Traversal Step
class TraversalStep(object):
def __init__(self, parent, key):
self.parent = parent
self.key = key
#
# Custom Object Codec
class BSONCoding(object):
__metaclass__ = ABCMeta
@abstractmethod
def bson_encode(self):
pass
@abstractmethod
def bson_init(self, raw_values):
pass
classes = {}
def import_class(cls):
if not issubclass(cls, BSONCoding):
return
global classes
classes[cls.__name__] = cls
def import_classes(*args):
for cls in args:
import_class(cls)
def import_classes_from_modules(*args):
for module in args:
for item in module.__dict__:
if hasattr(item, "__new__") and hasattr(item, "__name__"):
import_class(item)
def encode_object(obj, traversal_stack, generator_func):
values = obj.bson_encode()
class_name = obj.__class__.__name__
values["$$__CLASS_NAME__$$"] = class_name
return encode_document(values, traversal_stack, obj, generator_func)
def encode_object_element(name, value, traversal_stack, generator_func):
return "\x03" + encode_cstring(name) + \
encode_object(value, traversal_stack,
generator_func = generator_func)
class _EmptyClass(object):
pass
def decode_object(raw_values):
global classes
class_name = raw_values["$$__CLASS_NAME__$$"]
cls = None
try:
cls = classes[class_name]
except KeyError, e:
raise MissingClassDefinition(class_name)
retval = _EmptyClass()
retval.__class__ = cls
retval.bson_init(raw_values)
return retval
#
# Codec Logic
def encode_string(value):
value = value.encode("utf8")
length = len(value)
return struct.pack("<i%dsb" % (length,), length + 1, value, 0)
def decode_string(data, base):
length = unpack_length(data, base)[0]
value = data[base + 4: base + 4 + length - 1]
return (base + 4 + length, value)
def encode_cstring(value):
if isinstance(value, unicode):
value = value.encode("utf8")
return value + "\x00"
def decode_cstring(data, base):
end = data.index('\x00', base)
# NOTE(msolomon) this decode adds a depressing amount of overhead and
# seems incorrect. Nothing should expect more than a simple cstring.
# name = data[base:end].decode('utf8')
name = data[base:end]
return (end+1, name)
def encode_binary(value):
return binary_struct.pack(len(value), 0) + value
def decode_binary(data, base):
length, binary_type = unpack_binary_struct(data, base)
return (base + 5 + length, data[base + 5:base + 5 + length])
def decode_double(data, base):
return (base + 8, double_struct.unpack_from(data, base)[0])
def encode_double_element(name, value):
return "\x01" + encode_cstring(name) + double_struct.pack(value)
def decode_double_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_double(data, base)
return (base, name, value)
def encode_string_element(name, value):
return "\x02" + encode_cstring(name) + encode_string(value)
def decode_string_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_string(data, base)
value = value.decode("utf8")
return (base, name, value)
def encode_value(name, value, buf, traversal_stack, generator_func):
if isinstance(value, str):
buf.write(encode_binary_element(name, value))
elif isinstance(value, unicode):
buf.write(encode_string_element(name, value))
elif isinstance(value, int):
if value < -0x80000000 or value > 0x7fffffff:
buf.write(encode_int64_element(name, value))
else:
buf.write(encode_int32_element(name, value))
elif isinstance(value, long):
if value <= 0x7fffffffffffffff:
buf.write(encode_int64_element(name, value))
else:
buf.write(encode_uint64_element(name, value))
elif isinstance(value, bool):
buf.write(encode_boolean_element(name, value))
elif value is None:
buf.write(encode_none_element(name, value))
elif isinstance(value, dict):
buf.write(encode_document_element(name, value,
traversal_stack, generator_func))
elif isinstance(value, (list, tuple)):
buf.write(encode_array_element(name, value,
traversal_stack, generator_func))
elif isinstance(value, float):
buf.write(encode_double_element(name, value))
elif isinstance(value, datetime):
buf.write(encode_UTCdatetime_element(name, value))
elif isinstance(value, BSONCoding):
buf.write(encode_object_element(name, value, traversal_stack,
generator_func))
else:
raise ValueError('value has bad type', type(value))
def encode_document(obj, traversal_stack,
traversal_parent = None,
generator_func = None):
buf = cStringIO.StringIO()
key_iter = obj.iterkeys()
if generator_func is not None:
key_iter = generator_func(obj, traversal_stack)
for name in key_iter:
value = obj[name]
traversal_stack.append(TraversalStep(traversal_parent or obj, name))
encode_value(name, value, buf, traversal_stack, generator_func)
traversal_stack.pop()
e_list = buf.getvalue()
e_list_length = len(e_list)
return struct.pack("<i%dsb" % (e_list_length,), e_list_length + 4 + 1,
e_list, 0)
def encode_array(array, traversal_stack,
traversal_parent = None,
generator_func = None):
buf = cStringIO.StringIO()
for i, value in enumerate(array):
traversal_stack.append(TraversalStep(traversal_parent or array, i))
encode_value(str(i), value, buf, traversal_stack, generator_func)
traversal_stack.pop()
e_list = buf.getvalue()
e_list_length = len(e_list)
return struct.pack("<i%dsb" % (e_list_length,), e_list_length + 4 + 1,
e_list, 0)
def decode_document(data, base):
length = unpack_length(data, base)[0]
end_point = base + length
base += 4
retval = {}
while base < end_point - 1:
base, name, value = ELEMENT_DISPATCH[data[base]](data, base)
retval[name] = value
if "$$__CLASS_NAME__$$" in retval:
retval = decode_object(retval)
return (end_point, retval)
def encode_document_element(name, value, traversal_stack, generator_func):
return "\x03" + encode_cstring(name) + \
encode_document(value, traversal_stack,
generator_func = generator_func)
def decode_document_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_document(data, base)
return (base, name, value)
def encode_array_element(name, value, traversal_stack, generator_func):
return "\x04" + encode_cstring(name) + \
encode_array(value, traversal_stack, generator_func = generator_func)
def _decode_array_document(data, base):
length = unpack_length(data, base)[0]
end_point = base + length
base += 4
retval = []
while base < end_point - 1:
base, name, value = ELEMENT_DISPATCH[data[base]](data, base)
retval.append(value)
return (end_point, retval)
def decode_array_element(data, base):
base, name = decode_cstring(data, base + 1)
base, retval = _decode_array_document(data, base)
return (base, name, retval)
def encode_binary_element(name, value):
return "\x05" + encode_cstring(name) + encode_binary(value)
def decode_binary_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_binary(data, base)
return (base, name, value)
def encode_boolean_element(name, value):
return "\x08" + encode_cstring(name) + boolean_struct.pack(value)
def decode_boolean_element(data, base):
base, name = decode_cstring(data, base + 1)
value = bool(boolean_struct.unpack_from(data, base)[0])
return (base + 1, name, value)
def encode_UTCdatetime_element(name, value):
value = int(round(calendar.timegm(value.utctimetuple()) * 1000 +
(value.microsecond / 1000.0)))
return "\x09" + encode_cstring(name) + int64_struct.pack(value)
def decode_UTCdatetime_element(data, base):
base, name = decode_cstring(data, base + 1)
value = datetime.utcfromtimestamp(
int64_struct.unpack_from(data, base)[0] / 1000.0)
return (base + 8, name, value)
def encode_none_element(name, value):
return "\x0a" + encode_cstring(name)
def decode_none_element(data, base):
base, name = decode_cstring(data, base + 1)
return (base, name, None)
def encode_int32_element(name, value):
return "\x10" + encode_cstring(name) + length_struct.pack(value)
def decode_int32_element(data, base):
base, name = decode_cstring(data, base + 1)
value = unpack_length(data, base)[0]
return (base + 4, name, value)
def encode_int64_element(name, value):
return "\x12" + encode_cstring(name) + int64_struct.pack(value)
def encode_uint64_element(name, value):
return "\x3F" + encode_cstring(name) + uint64_struct.pack(value)
def decode_int64_element(data, base):
base, name = decode_cstring(data, base + 1)
value = int64_struct.unpack_from(data, base)[0]
return (base + 8, name, value)
def decode_uint64_element(data, base):
base, name = decode_cstring(data, base + 1)
value = uint64_struct.unpack_from(data, base)[0]
return (base + 8, name, value)
ELEMENT_TYPES = {
0x01 : "double",
0x02 : "string",
0x03 : "document",
0x04 : "array",
0x05 : "binary",
0x08 : "boolean",
0x09 : "UTCdatetime",
0x0A : "none",
0x10 : "int32",
0x12 : "int64",
0x3F : "uint64"
}
# optimize dispatch once all methods are known
ELEMENT_DISPATCH = dict([(chr(i), globals()["decode_" + name + "_element"])
for i, name in ELEMENT_TYPES.iteritems()])
| bsd-3-clause |
rhiever/MarkovNetwork | MarkovNetwork/MarkovNetwork.py | 1 | 10471 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Randal S. Olson
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import numpy as np
class MarkovNetwork(object):
"""A Markov Network for neural computing."""
max_markov_gate_inputs = 4
max_markov_gate_outputs = 4
def __init__(self, num_input_states, num_memory_states, num_output_states,
random_genome_length=10000, seed_num_markov_gates=4,
probabilistic=True, genome=None):
"""Sets up a Markov Network
Parameters
----------
num_input_states: int
The number of input states in the Markov Network
num_memory_states: int
The number of internal memory states in the Markov Network
num_output_states: int
The number of output states in the Markov Network
random_genome_length: int (default: 10000)
Length of the genome if it is being randomly generated
This parameter is ignored if "genome" is not None
seed_num_markov_gates: int (default: 4)
The number of Markov Gates with which to seed the Markov Network
It is important to ensure that randomly-generated Markov Networks have at least a few Markov Gates to begin with
May sometimes result in fewer Markov Gates if the Markov Gates are randomly seeded in the same location
This parameter is ignored if "genome" is not None
probabilistic: bool (default: True)
Flag indicating whether the Markov Gates are probabilistic or deterministic
genome: array-like (default: None)
An array representation of the Markov Network to construct
All values in the array must be integers in the range [0, 255]
If None, then a random Markov Network will be generated
Returns
-------
None
"""
self.num_input_states = num_input_states
self.num_memory_states = num_memory_states
self.num_output_states = num_output_states
self.states = np.zeros(num_input_states + num_memory_states + num_output_states, dtype=np.bool)
self.markov_gates = []
self.markov_gate_input_ids = []
self.markov_gate_output_ids = []
if genome is None:
self.genome = np.random.randint(0, 256, random_genome_length).astype(np.uint8)
# Seed the random genome with seed_num_markov_gates Markov Gates
for _ in range(seed_num_markov_gates):
start_index = np.random.randint(0, int(len(self.genome) * 0.8))
self.genome[start_index] = 42
self.genome[start_index + 1] = 213
else:
self.genome = np.array(genome, dtype=np.uint8)
self._setup_markov_network(probabilistic)
def _setup_markov_network(self, probabilistic):
"""Interprets the internal genome into the corresponding Markov Gates
Parameters
----------
probabilistic: bool
Flag indicating whether the Markov Gates are probabilistic or deterministic
Returns
-------
None
"""
for index_counter in range(self.genome.shape[0] - 1):
# Sequence of 42 then 213 indicates a new Markov Gate
if self.genome[index_counter] == 42 and self.genome[index_counter + 1] == 213:
internal_index_counter = index_counter + 2
# Determine the number of inputs and outputs for the Markov Gate
num_inputs = (self.genome[internal_index_counter] % MarkovNetwork.max_markov_gate_inputs) + 1
internal_index_counter += 1
num_outputs = (self.genome[internal_index_counter] % MarkovNetwork.max_markov_gate_outputs) + 1
internal_index_counter += 1
# Make sure that the genome is long enough to encode this Markov Gate
if (internal_index_counter +
(MarkovNetwork.max_markov_gate_inputs + MarkovNetwork.max_markov_gate_outputs) +
(2 ** num_inputs) * (2 ** num_outputs)) > self.genome.shape[0]:
continue
# Determine the states that the Markov Gate will connect its inputs and outputs to
input_state_ids = self.genome[internal_index_counter:internal_index_counter + MarkovNetwork.max_markov_gate_inputs][:num_inputs]
input_state_ids = np.mod(input_state_ids, self.states.shape[0])
internal_index_counter += MarkovNetwork.max_markov_gate_inputs
output_state_ids = self.genome[internal_index_counter:internal_index_counter + MarkovNetwork.max_markov_gate_outputs][:num_outputs]
output_state_ids = np.mod(output_state_ids, self.states.shape[0])
internal_index_counter += MarkovNetwork.max_markov_gate_outputs
self.markov_gate_input_ids.append(input_state_ids)
self.markov_gate_output_ids.append(output_state_ids)
# Interpret the probability table for the Markov Gate
markov_gate = np.copy(self.genome[internal_index_counter:internal_index_counter + (2 ** num_inputs) * (2 ** num_outputs)])
markov_gate = markov_gate.reshape((2 ** num_inputs, 2 ** num_outputs))
if probabilistic: # Probabilistic Markov Gates
markov_gate = markov_gate.astype(np.float64) / np.sum(markov_gate, axis=1, dtype=np.float64)[:, None]
# Precompute the cumulative sums for the activation function
markov_gate = np.cumsum(markov_gate, axis=1, dtype=np.float64)
else: # Deterministic Markov Gates
row_max_indices = np.argmax(markov_gate, axis=1)
markov_gate[:, :] = 0
markov_gate[np.arange(len(row_max_indices)), row_max_indices] = 1
self.markov_gates.append(markov_gate)
def activate_network(self, num_activations=1):
"""Activates the Markov Network
Parameters
----------
num_activations: int (default: 1)
The number of times the Markov Network should be activated
Returns
-------
None
"""
# Save original input values
original_input_values = np.copy(self.states[:self.num_input_states])
for _ in range(num_activations):
# NOTE: This routine can be refactored to use NumPy if larger MNs are being used
# See implementation at https://github.com/rhiever/MarkovNetwork/blob/a381aa9919bb6898b56f678e08127ba6e0eef98f/MarkovNetwork/MarkovNetwork.py#L162:L169
for markov_gate, mg_input_ids, mg_output_ids in zip(self.markov_gates, self.markov_gate_input_ids,
self.markov_gate_output_ids):
mg_input_index, marker = 0, 1
# Create an integer from bytes representation (loop is faster than previous implementation)
for mg_input_id in reversed(mg_input_ids):
if self.states[mg_input_id]:
mg_input_index += marker
marker *= 2
# Determine the corresponding output values for this Markov Gate
roll = np.random.uniform() # sets a roll value
markov_gate_subarray = markov_gate[mg_input_index] # selects a Markov Gate subarray
# Searches for the first value where markov_gate > roll
for i, markov_gate_element in enumerate(markov_gate_subarray):
if markov_gate_element >= roll:
mg_output_index = i
break
# Converts the index into a string of '1's and '0's (binary representation)
mg_output_values = bin(mg_output_index) # bin() is much faster than np.binaryrepr()
# diff_len deals with the lack of the width argument there was on np.binaryrepr()
diff_len = mg_output_ids.shape[0] - (len(mg_output_values) - 2)
# Loops through 'mg_output_values' and alter 'self.states'
for i, mg_output_value in enumerate(mg_output_values[2:]):
if mg_output_value == '1':
self.states[mg_output_ids[i + diff_len]] = True
# Replace original input values
self.states[:self.num_input_states] = original_input_values
def update_input_states(self, input_values):
"""Updates the input states with the provided inputs
Parameters
----------
input_values: array-like
An array of integers containing the inputs for the Markov Network
len(input_values) must be equal to num_input_states
Returns
-------
None
"""
if len(input_values) != self.num_input_states:
raise ValueError('Invalid number of input values provided')
self.states[:self.num_input_states] = input_values
def get_output_states(self):
"""Returns an array of the current output state's values
Parameters
----------
None
Returns
-------
output_states: array-like
An array of the current output state's values
"""
return np.array(self.states[-self.num_output_states:])
| mit |
rpmcpp/Audacity | lib-src/portmidi/pm_python/setup.py | 90 | 5532 | import sys
import os
import logging
from distutils.core import setup, Command
from distutils.extension import Extension
try:
from Cython.Distutils import build_ext
except ImportError:
logging.warn("Cython is preferred over pyrex for python3 compatibility.")
from Pyrex.Distutils import build_ext
DESCRIPTION = open('README_PYTHON.txt').read()
CHANGES = open('CHANGES.txt').read()
TODO = open('TODO.txt').read()
EXTRAS = {}
long_description = DESCRIPTION + CHANGES + TODO
#import sys
#if "checkdocs" in sys.argv:
# print long_description
METADATA = {
'name': 'pyportmidi',
'version': '0.0.7',
'license': 'MIT License',
'url': 'http://pypi.python.org/pyportmidi/',
'author': 'John Harrison, Roger B. Dannenberg, Rene Dudfield, others...',
'author_email': 'renesd@gmail.com',
'maintainer': 'Rene Dudfield',
'maintainer_email': 'renesd@gmail.com',
'description': 'Python Wrappings for PortMidi #python. CHANGES: new package layout.',
'long_description': long_description,
'classifiers': [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Cython',
'Programming Language :: C',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: Multimedia :: Sound/Audio :: MIDI',
'Topic :: Software Development :: Libraries',
],
}
if "bdist_msi" in sys.argv:
# hack the version name to a format msi doesn't have trouble with
METADATA["version"] = METADATA["version"].replace("pre", "a0")
METADATA["version"] = METADATA["version"].replace("rc", "b0")
METADATA["version"] = METADATA["version"].replace("release", "")
# allow optionally using setuptools for bdist_egg.
using_setuptools = False
if "-setuptools" in sys.argv:
using_setuptools = True
from setuptools import setup, Command
sys.argv.remove ("-setuptools")
EXTRAS.update({'include_package_data': True,
'install_requires': [],
'zip_safe': False,
'test_suite' : 'pyportmidi.tests',
}
)
# test command. For doing 'python setup.py test'
class TestCommand(Command):
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
'''
runs the tests with default options.
'''
import pyportmidi.tests
pyportmidi.tests.main()
#import subprocess
#return subprocess.call([sys.executable, "run_tests.py"])
cmdclass = {'build_ext': build_ext}
# we use our test command.
if not using_setuptools:
import os
cmdclass['test'] = TestCommand
scripts = []
PACKAGEDATA = {
'cmdclass': cmdclass,
'package_dir': {'pyportmidi': 'pyportmidi',
#'pyportmidi.tests': 'test',
#'pyportmidi.docs': 'docs',
#'pyportmidi.examples': 'examples',
},
'packages': ['pyportmidi',
'pyportmidi.tests',
],
'scripts': scripts,
}
PACKAGEDATA.update(METADATA)
PACKAGEDATA.update(EXTRAS)
if sys.platform == 'win32':
print "Found Win32 platform"
EXTENSION = dict(
ext_modules=[
Extension("pyportmidi._pyportmidi", [os.path.join("pyportmidi", "_pyportmidi.pyx")],
library_dirs = ["../Release"],
libraries = ["portmidi", "winmm"],
include_dirs = ["../porttime"],
# define_macros = [("_WIN32_", None)]) # needed by portmidi.h
extra_compile_args = ["/DWIN32"]) # needed by portmidi.h
]
)
elif sys.platform == 'darwin':
print "Found darwin (OS X) platform"
library_dirs = ["/usr/local/lib"]
include_dirs = ["/usr/local/include"]
EXTENSION = dict(
ext_modules=[
Extension("pyportmidi._pyportmidi", [os.path.join("pyportmidi", "_pyportmidi.pyx")],
library_dirs = library_dirs,
include_dirs = include_dirs,
libraries = ["portmidi"],
extra_link_args=["-framework", "CoreFoundation",
"-framework", "CoreMIDI",
"-framework", "CoreAudio"])
]
)
else:
print "Assuming Linux platform"
EXTENSION = dict(
ext_modules=[
Extension("pyportmidi._pyportmidi", [os.path.join("pyportmidi", "_pyportmidi.pyx")],
library_dirs=["./linux"],
libraries = ["portmidi", "asound", "pthread"]
)
]
)
PACKAGEDATA.update(EXTENSION)
setup(**PACKAGEDATA)
| gpl-2.0 |
Rumata888/binnavi | src/main/java/com/google/security/zynamics/binnavi/scripts/dominator_tree.py | 70 | 3952 | """
Copyright 2014 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# This BinNavi plugin creates a view that contains the dominator tree
# of an input view. For this purpose the plugin extends the menu of
# graph windows with an additional "Create Dominator Tree" menu.
import sys
from java.lang import Thread
from javax.swing import JMenuItem as JMenuItem
from javax.swing import AbstractAction as AbstractAction
from com.google.security.zynamics.binnavi.API.disassembly import CouldntSaveDataException as CouldntSaveDataException
from com.google.security.zynamics.binnavi.API.disassembly import EdgeType as EdgeType
from com.google.security.zynamics.binnavi.API.helpers import MessageBox as MessageBox
from com.google.security.zynamics.binnavi.API.helpers import GraphAlgorithms as GraphAlgorithms
from com.google.security.zynamics.binnavi.API.plugins import IGraphMenuPlugin as IGraphMenuPlugin
def findRoot(nodes):
"""Finds the root node of a view. Note that this function is a bit imprecise
but it should do the trick for most views."""
for node in nodes:
if len(node.parents) == 0:
return node
return nodes[0]
def createView(view, tree_node):
"""Fills a given view with the nodes of a dominator tree"""
graph_node = view.createNode(tree_node.object)
for child in tree_node.children:
child_node = createView(view, child)
view.createEdge(graph_node, child_node, EdgeType.JumpUnconditional)
return graph_node
def create_dominator_view(view):
"""Takes a view, calculates its dominator tree, and creates a new view
that shows that dominator tree."""
if len(view.graph.nodes) == 0:
MessageBox.showError("Can not create dominator tree of empty views")
return
# Calculate the dominator tree
dominator_tree = GraphAlgorithms.getDominatorTree(view.graph, findRoot(view.graph.nodes), None)
try:
# Create the new view
tree_view = view.container.createView("Dominator Tree: '%s'" % view.name, "")
# Copy all the nodes from the dominator tree into the new view
createView(tree_view, dominator_tree.rootNode)
return tree_view
except CouldntSaveDataException:
MessageBox.showError("Could not create the dominator tree view")
return None
class MessageAction(AbstractAction):
def __init__(self, pi, frame):
AbstractAction.__init__(self, "Create Dominator Tree")
self.pi = pi
self.frame = frame
def actionPerformed(self, e):
view = create_dominator_view(self.frame.view2D.view)
if view != None:
t = WorkaroundThread(self.pi, self.frame.window, view)
t.start()
# new_view2d.save()
class DominatorTreePlugin(IGraphMenuPlugin):
def getName(self):
return "Dominator Tree Plugin"
def getGuid(self):
return 945436890432
def getDescription(self):
return "Creates the dominator tree of a view"
def init(self, pi):
self.pi = pi
def closed(self, frame):
pass
def unload(self):
pass
def extendPluginMenu(self, frame):
return [ JMenuItem(MessageAction(self.pi, frame)) ]
class WorkaroundThread(Thread):
def __init__(self, pi, window, view):
self.pi = pi
self.window = window
self.view = view
def run(self):
new_view2d = self.pi.showInWindow(self.window, self.view)
new_view2d.doHierarchicalLayout()
dominatorTree = DominatorTreePlugin()
navi.getPluginRegistry().addPlugin(dominatorTree)
| apache-2.0 |
NINAnor/QGIS | python/plugins/processing/algs/qgis/RandomSelectionWithinSubsets.py | 5 | 4966 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RandomSelectionWithinSubsets.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import random
from qgis.core import QgsFeature
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class RandomSelectionWithinSubsets(GeoAlgorithm):
INPUT = 'INPUT'
METHOD = 'METHOD'
NUMBER = 'NUMBER'
FIELD = 'FIELD'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.allowOnlyOpenedLayers = True
self.name, self.i18n_name = self.trAlgorithm('Random selection within subsets')
self.group, self.i18n_group = self.trAlgorithm('Vector selection tools')
self.methods = [self.tr('Number of selected features'),
self.tr('Percentage of selected features')]
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.FIELD,
self.tr('ID Field'), self.INPUT))
self.addParameter(ParameterSelection(self.METHOD,
self.tr('Method'), self.methods, 0))
self.addParameter(ParameterNumber(self.NUMBER,
self.tr('Number/percentage of selected features'), 1, None, 10))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Selection stratified'), True))
def processAlgorithm(self, progress):
filename = self.getParameterValue(self.INPUT)
layer = dataobjects.getObjectFromUri(filename)
field = self.getParameterValue(self.FIELD)
method = self.getParameterValue(self.METHOD)
layer.removeSelection()
index = layer.fieldNameIndex(field)
unique = vector.getUniqueValues(layer, index)
featureCount = layer.featureCount()
value = int(self.getParameterValue(self.NUMBER))
if method == 0:
if value > featureCount:
raise GeoAlgorithmExecutionException(
self.tr('Selected number is greater that feature count. '
'Choose lesser value and try again.'))
else:
if value > 100:
raise GeoAlgorithmExecutionException(
self.tr("Percentage can't be greater than 100. Set a "
"different value and try again."))
value = value / 100.0
selran = []
inFeat = QgsFeature()
current = 0
total = 100.0 / float(featureCount * len(unique))
if not len(unique) == featureCount:
for i in unique:
features = vector.features(layer)
FIDs = []
for inFeat in features:
attrs = inFeat.attributes()
if attrs[index] == i:
FIDs.append(inFeat.id())
current += 1
progress.setPercentage(int(current * total))
if method == 1:
selValue = int(round(value * len(FIDs), 0))
else:
selValue = value
if selValue >= len(FIDs):
selFeat = FIDs
else:
selFeat = random.sample(FIDs, selValue)
selran.extend(selFeat)
layer.setSelectedFeatures(selran)
else:
layer.setSelectedFeatures(range(0, featureCount))
self.setOutputValue(self.OUTPUT, filename)
| gpl-2.0 |
rosihorrorshow/askbot-devel | askbot/management/commands/initialize_ldap_logins.py | 3 | 2737 | """Management command to create LDAP login method for all users.
Please see description of the command in its ``help_text``.
"""
import datetime
from django.core.management.base import CommandError
from django.utils.translation import ugettext as _
from askbot.management import NoArgsJob
from askbot import models
from askbot.deps.django_authopenid.models import UserAssociation
from askbot.conf import settings as askbot_settings
def create_ldap_login_for_user(user):
"""a unit job that creates LDAP account record for
the user, assuming that his or her LDAP user name
is the same as the user name on the forum site.
If the record already exists, LDAP provider name
will be updated according to the live setting,
otherwise a new record will be created.
Always returns ``True``.
"""
ldap_url = askbot_settings.LDAP_URL
ldap_provider_name = askbot_settings.LDAP_PROVIDER_NAME
if '' in (ldap_url, ldap_provider_name):
raise CommandError(
'Please, first set up LDAP settings '
'at url /settings/EXTERNAL_KEYS,'
'relative to the base url of your forum site'
)
try:
assoc = UserAssociation.objects.get(
openid_url = user.username,
user = user
)
except UserAssociation.DoesNotExist:
assoc = UserAssociation(
openid_url = user.username,
user = user
)
assoc.provider_name = ldap_provider_name
assoc.last_used_timestamp = datetime.datetime.now()
assoc.save()
return True
class Command(NoArgsJob):
"""definition of the job that
runs through all users and creates LDAP login
methods, assuming that LDAP user ID's are the same
as values ``~askbot.User.username``
"""
help = _(
'This command may help you migrate to LDAP '
'password authentication by creating a record '
'for LDAP association with each user account. '
'There is an assumption that ldap user id\'s are '
'the same as user names registered at the site. '
'Before running this command it is necessary to '
'set up LDAP parameters in the "External keys" section '
'of the site settings.'
)
def __init__(self, *args, **kwargs):
self.batches = ({
'title': 'Initializing LDAP logins for all users: ',
'query_set': models.User.objects.all(),
'function': create_ldap_login_for_user,
'changed_count_message': 'Created LDAP logins for %d users',
'nothing_changed_message': 'All users already have LDAP login methods'
},)
super(Command, self).__init__(*args, **kwargs)
| gpl-3.0 |
andrewyoung1991/scons | test/ToolSurrogate.py | 5 | 3380 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that SCons supports use of a home-brew ToolSurrogate class
like we use in our bin/sconsexamples.py script.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
class Curry(object):
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.fun(*self.pending + args, **kw)
def Str(target, source, env, cmd=""):
result = []
for cmd in env.subst_list(cmd, target=target, source=source):
result.append(" ".join(map(str, cmd)))
return '\\n'.join(result)
class ToolSurrogate(object):
def __init__(self, tool, variable, func):
self.tool = tool
self.variable = variable
self.func = func
def __call__(self, env):
t = Tool(self.tool)
t.generate(env)
orig = env[self.variable]
env[self.variable] = Action(self.func, strfunction=Curry(Str, cmd=orig))
def Cat(target, source, env):
target = str(target[0])
f = open(target, "wb")
for src in map(str, source):
f.write(open(src, "rb").read())
f.close()
ToolList = {
'posix' : [('cc', 'CCCOM', Cat),
('link', 'LINKCOM', Cat)],
'win32' : [('msvc', 'CCCOM', Cat),
('mslink', 'LINKCOM', Cat)]
}
platform = ARGUMENTS['platform']
tools = [ToolSurrogate(*t) for t in ToolList[platform]]
env = Environment(tools=tools, PROGSUFFIX='.exe', OBJSUFFIX='.obj')
env.Program('foo.c')
""")
test.write('foo.c', "foo.c posix\n")
test.run(arguments = '. platform=posix', stdout = test.wrap_stdout("""\
cc -o foo.obj -c foo.c
cc -o foo.exe foo.obj
"""))
test.write('foo.c', "foo.c win32\n")
test.run(arguments = '. platform=win32', stdout = test.wrap_stdout("""\
cl /Fofoo.obj /c foo.c /nologo
link /nologo /OUT:foo.exe foo.obj
embedManifestExeCheck(target, source, env)
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
huggingface/transformers | src/transformers/models/deberta_v2/modeling_deberta_v2.py | 1 | 61698 | # coding=utf-8
# Copyright 2020 Microsoft and the Hugging Face Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DeBERTa-v2 model. """
import math
from collections.abc import Sequence
import numpy as np
import torch
from torch import _softmax_backward_data, nn
from torch.nn import CrossEntropyLoss, LayerNorm
from ...activations import ACT2FN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutput,
MaskedLMOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_deberta_v2 import DebertaV2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DebertaV2Config"
_TOKENIZER_FOR_DOC = "DebertaV2Tokenizer"
_CHECKPOINT_FOR_DOC = "microsoft/deberta-v2-xlarge"
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/deberta-v2-xlarge",
"microsoft/deberta-v2-xxlarge",
"microsoft/deberta-v2-xlarge-mnli",
"microsoft/deberta-v2-xxlarge-mnli",
]
# Copied from transformers.models.deberta.modeling_deberta.ContextPooler
class ContextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
self.dropout = StableDropout(config.pooler_dropout)
self.config = config
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
context_token = hidden_states[:, 0]
context_token = self.dropout(context_token)
pooled_output = self.dense(context_token)
pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
return pooled_output
@property
def output_dim(self):
return self.config.hidden_size
# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2
class XSoftmax(torch.autograd.Function):
"""
Masked Softmax which is optimized for saving memory
Args:
input (:obj:`torch.tensor`): The input tensor that will apply softmax.
mask (:obj:`torch.IntTensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
dim (int): The dimension that will apply softmax
Example::
>>> import torch
>>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax
>>> # Make a tensor
>>> x = torch.randn([4,20,100])
>>> # Create a mask
>>> mask = (x>0).int()
>>> y = XSoftmax.apply(x, mask, dim=-1)
"""
@staticmethod
def forward(self, input, mask, dim):
self.dim = dim
rmask = ~(mask.bool())
output = input.masked_fill(rmask, float("-inf"))
output = torch.softmax(output, self.dim)
output.masked_fill_(rmask, 0)
self.save_for_backward(output)
return output
@staticmethod
def backward(self, grad_output):
(output,) = self.saved_tensors
inputGrad = _softmax_backward_data(grad_output, output, self.dim, output)
return inputGrad, None, None
# Copied from transformers.models.deberta.modeling_deberta.DropoutContext
class DropoutContext(object):
def __init__(self):
self.dropout = 0
self.mask = None
self.scale = 1
self.reuse_mask = True
# Copied from transformers.models.deberta.modeling_deberta.get_mask
def get_mask(input, local_context):
if not isinstance(local_context, DropoutContext):
dropout = local_context
mask = None
else:
dropout = local_context.dropout
dropout *= local_context.scale
mask = local_context.mask if local_context.reuse_mask else None
if dropout > 0 and mask is None:
mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool()
if isinstance(local_context, DropoutContext):
if local_context.mask is None:
local_context.mask = mask
return mask, dropout
# Copied from transformers.models.deberta.modeling_deberta.XDropout
class XDropout(torch.autograd.Function):
"""Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
@staticmethod
def forward(ctx, input, local_ctx):
mask, dropout = get_mask(input, local_ctx)
ctx.scale = 1.0 / (1 - dropout)
if dropout > 0:
ctx.save_for_backward(mask)
return input.masked_fill(mask, 0) * ctx.scale
else:
return input
@staticmethod
def backward(ctx, grad_output):
if ctx.scale > 1:
(mask,) = ctx.saved_tensors
return grad_output.masked_fill(mask, 0) * ctx.scale, None
else:
return grad_output, None
# Copied from transformers.models.deberta.modeling_deberta.StableDropout
class StableDropout(nn.Module):
"""
Optimized dropout module for stabilizing the training
Args:
drop_prob (float): the dropout probabilities
"""
def __init__(self, drop_prob):
super().__init__()
self.drop_prob = drop_prob
self.count = 0
self.context_stack = None
def forward(self, x):
"""
Call the module
Args:
x (:obj:`torch.tensor`): The input tensor to apply dropout
"""
if self.training and self.drop_prob > 0:
return XDropout.apply(x, self.get_context())
return x
def clear_context(self):
self.count = 0
self.context_stack = None
def init_context(self, reuse_mask=True, scale=1):
if self.context_stack is None:
self.context_stack = []
self.count = 0
for c in self.context_stack:
c.reuse_mask = reuse_mask
c.scale = scale
def get_context(self):
if self.context_stack is not None:
if self.count >= len(self.context_stack):
self.context_stack.append(DropoutContext())
ctx = self.context_stack[self.count]
ctx.dropout = self.drop_prob
self.count += 1
return ctx
else:
return self.drop_prob
# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm
class DebertaV2SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2
class DebertaV2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = DisentangledSelfAttention(config)
self.output = DebertaV2SelfOutput(config)
self.config = config
def forward(
self,
hidden_states,
attention_mask,
return_att=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
self_output = self.self(
hidden_states,
attention_mask,
return_att,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if return_att:
self_output, att_matrix = self_output
if query_states is None:
query_states = hidden_states
attention_output = self.output(self_output, query_states)
if return_att:
return (attention_output, att_matrix)
else:
return attention_output
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2
class DebertaV2Intermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm
class DebertaV2Output(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2
class DebertaV2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = DebertaV2Attention(config)
self.intermediate = DebertaV2Intermediate(config)
self.output = DebertaV2Output(config)
def forward(
self,
hidden_states,
attention_mask,
return_att=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
attention_output = self.attention(
hidden_states,
attention_mask,
return_att=return_att,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if return_att:
attention_output, att_matrix = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if return_att:
return (layer_output, att_matrix)
else:
return layer_output
class ConvLayer(nn.Module):
def __init__(self, config):
super().__init__()
kernel_size = getattr(config, "conv_kernel_size", 3)
groups = getattr(config, "conv_groups", 1)
self.conv_act = getattr(config, "conv_act", "tanh")
self.conv = nn.Conv1d(
config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups
)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
def forward(self, hidden_states, residual_states, input_mask):
out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
rmask = (1 - input_mask).bool()
out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)
out = ACT2FN[self.conv_act](self.dropout(out))
layer_norm_input = residual_states + out
output = self.LayerNorm(layer_norm_input).to(layer_norm_input)
if input_mask is None:
output_states = output
else:
if input_mask.dim() != layer_norm_input.dim():
if input_mask.dim() == 4:
input_mask = input_mask.squeeze(1).squeeze(1)
input_mask = input_mask.unsqueeze(2)
input_mask = input_mask.to(output.dtype)
output_states = output * input_mask
return output_states
class DebertaV2Encoder(nn.Module):
"""Modified BertEncoder with relative position bias support"""
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.position_buckets = getattr(config, "position_buckets", -1)
pos_ebd_size = self.max_relative_positions * 2
if self.position_buckets > 0:
pos_ebd_size = self.position_buckets * 2
self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
if "layer_norm" in self.norm_rel_ebd:
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
rel_embeddings = self.LayerNorm(rel_embeddings)
return rel_embeddings
def get_attention_mask(self, attention_mask):
if attention_mask.dim() <= 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
attention_mask = attention_mask.byte()
elif attention_mask.dim() == 3:
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
relative_pos = build_relative_position(
q, hidden_states.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions
)
return relative_pos
def forward(
self,
hidden_states,
attention_mask,
output_hidden_states=True,
output_attentions=False,
query_states=None,
relative_pos=None,
return_dict=True,
):
if attention_mask.dim() <= 2:
input_mask = attention_mask
else:
input_mask = (attention_mask.sum(-2) > 0).byte()
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[0]
else:
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
output_states = next_kv
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
output_states = layer_module(
next_kv,
attention_mask,
output_attentions,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if output_attentions:
output_states, att_m = output_states
if i == 0 and self.conv is not None:
output_states = self.conv(hidden_states, output_states, input_mask)
if query_states is not None:
query_states = output_states
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
else:
next_kv = output_states
if output_attentions:
all_attentions = all_attentions + (att_m,)
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
if not return_dict:
return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
)
def make_log_bucket_position(relative_pos, bucket_size, max_position):
sign = np.sign(relative_pos)
mid = bucket_size // 2
abs_pos = np.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, np.abs(relative_pos))
log_pos = np.ceil(np.log(abs_pos / mid) / np.log((max_position - 1) / mid) * (mid - 1)) + mid
bucket_pos = np.where(abs_pos <= mid, relative_pos, log_pos * sign).astype(np.int)
return bucket_pos
def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
"""
Build relative position according to the query and key
We assume the absolute position of query :math:`P_q` is range from (0, query_size) and the absolute position of key
:math:`P_k` is range from (0, key_size), The relative positions from query to key is :math:`R_{q \\rightarrow k} =
P_q - P_k`
Args:
query_size (int): the length of query
key_size (int): the length of key
bucket_size (int): the size of position bucket
max_position (int): the maximum allowed absolute position
Return:
:obj:`torch.LongTensor`: A tensor with shape [1, query_size, key_size]
"""
q_ids = np.arange(0, query_size)
k_ids = np.arange(0, key_size)
rel_pos_ids = q_ids[:, None] - np.tile(k_ids, (q_ids.shape[0], 1))
if bucket_size > 0 and max_position > 0:
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
rel_pos_ids = torch.tensor(rel_pos_ids, dtype=torch.long)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = rel_pos_ids.unsqueeze(0)
return rel_pos_ids
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
class DisentangledSelfAttention(nn.Module):
"""
Disentangled self-attention module
Parameters:
config (:obj:`DebertaV2Config`):
A model config class instance with the configuration to build a new model. The schema is similar to
`BertConfig`, for more details, please refer :class:`~transformers.DebertaV2Config`
"""
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
_attention_head_size = config.hidden_size // config.num_attention_heads
self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.share_att_key = getattr(config, "share_att_key", False)
self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.position_buckets = getattr(config, "position_buckets", -1)
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.pos_ebd_size = self.max_relative_positions
if self.position_buckets > 0:
self.pos_ebd_size = self.position_buckets
self.pos_dropout = StableDropout(config.hidden_dropout_prob)
if not self.share_att_key:
if "c2p" in self.pos_att_type or "p2p" in self.pos_att_type:
self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
if "p2c" in self.pos_att_type or "p2p" in self.pos_att_type:
self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = StableDropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, attention_heads):
new_x_shape = x.size()[:-1] + (attention_heads, -1)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))
def forward(
self,
hidden_states,
attention_mask,
return_att=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
"""
Call the module
Args:
hidden_states (:obj:`torch.FloatTensor`):
Input states to the module usually the output from previous layer, it will be the Q,K and V in
`Attention(Q,K,V)`
attention_mask (:obj:`torch.ByteTensor`):
An attention mask matrix of shape [`B`, `N`, `N`] where `B` is the batch size, `N` is the maximum
sequence length in which element [i,j] = `1` means the `i` th token in the input can attend to the `j`
th token.
return_att (:obj:`bool`, optional):
Whether return the attention matrix.
query_states (:obj:`torch.FloatTensor`, optional):
The `Q` state in `Attention(Q,K,V)`.
relative_pos (:obj:`torch.LongTensor`):
The relative position encoding between the tokens in the sequence. It's of shape [`B`, `N`, `N`] with
values ranging in [`-max_relative_positions`, `max_relative_positions`].
rel_embeddings (:obj:`torch.FloatTensor`):
The embedding of relative distances. It's a tensor of shape [:math:`2 \\times
\\text{max_relative_positions}`, `hidden_size`].
"""
if query_states is None:
query_states = hidden_states
query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
rel_att = None
# Take the dot product between "query" and "key" to get the raw attention scores.
scale_factor = 1
if "c2p" in self.pos_att_type:
scale_factor += 1
if "p2c" in self.pos_att_type:
scale_factor += 1
if "p2p" in self.pos_att_type:
scale_factor += 1
scale = math.sqrt(query_layer.size(-1) * scale_factor)
attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale
if self.relative_attention:
rel_embeddings = self.pos_dropout(rel_embeddings)
rel_att = self.disentangled_attention_bias(
query_layer, key_layer, relative_pos, rel_embeddings, scale_factor
)
if rel_att is not None:
attention_scores = attention_scores + rel_att
attention_scores = attention_scores
attention_scores = attention_scores.view(
-1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1)
)
# bsz x height x length x dimension
attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.bmm(
attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer
)
context_layer = (
context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1))
.permute(0, 2, 1, 3)
.contiguous()
)
new_context_layer_shape = context_layer.size()[:-2] + (-1,)
context_layer = context_layer.view(*new_context_layer_shape)
if return_att:
return (context_layer, attention_probs)
else:
return context_layer
def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
if relative_pos is None:
q = query_layer.size(-2)
relative_pos = build_relative_position(
q, key_layer.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions
)
if relative_pos.dim() == 2:
relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
elif relative_pos.dim() == 3:
relative_pos = relative_pos.unsqueeze(1)
# bsz x height x query x key
elif relative_pos.dim() != 4:
raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
att_span = self.pos_ebd_size
relative_pos = relative_pos.long().to(query_layer.device)
rel_embeddings = rel_embeddings[self.pos_ebd_size - att_span : self.pos_ebd_size + att_span, :].unsqueeze(0)
if self.share_att_key:
pos_query_layer = self.transpose_for_scores(
self.query_proj(rel_embeddings), self.num_attention_heads
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
)
else:
if "c2p" in self.pos_att_type or "p2p" in self.pos_att_type:
pos_key_layer = self.transpose_for_scores(
self.pos_key_proj(rel_embeddings), self.num_attention_heads
).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
) # .split(self.all_head_size, dim=-1)
if "p2c" in self.pos_att_type or "p2p" in self.pos_att_type:
pos_query_layer = self.transpose_for_scores(
self.pos_query_proj(rel_embeddings), self.num_attention_heads
).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
) # .split(self.all_head_size, dim=-1)
score = 0
# content->position
if "c2p" in self.pos_att_type:
scale = math.sqrt(pos_key_layer.size(-1) * scale_factor)
c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))
c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
c2p_att = torch.gather(
c2p_att,
dim=-1,
index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]),
)
score += c2p_att / scale
# position->content
if "p2c" in self.pos_att_type or "p2p" in self.pos_att_type:
scale = math.sqrt(pos_query_layer.size(-1) * scale_factor)
if key_layer.size(-2) != query_layer.size(-2):
r_pos = build_relative_position(
key_layer.size(-2),
key_layer.size(-2),
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
).to(query_layer.device)
r_pos = r_pos.unsqueeze(0)
else:
r_pos = relative_pos
p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
if query_layer.size(-2) != key_layer.size(-2):
pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)
if "p2c" in self.pos_att_type:
p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))
p2c_att = torch.gather(
p2c_att,
dim=-1,
index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]),
).transpose(-1, -2)
if query_layer.size(-2) != key_layer.size(-2):
p2c_att = torch.gather(
p2c_att,
dim=-2,
index=pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))),
)
score += p2c_att / scale
# position->position
if "p2p" in self.pos_att_type:
pos_query = pos_query_layer[:, :, att_span:, :]
p2p_att = torch.matmul(pos_query, pos_key_layer.transpose(-1, -2))
p2p_att = p2p_att.expand(query_layer.size()[:2] + p2p_att.size()[2:])
if query_layer.size(-2) != key_layer.size(-2):
p2p_att = torch.gather(
p2p_att,
dim=-2,
index=pos_index.expand(query_layer.size()[:2] + (pos_index.size(-2), p2p_att.size(-1))),
)
p2p_att = torch.gather(
p2p_att,
dim=-1,
index=c2p_pos.expand(
[query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]
),
)
score += p2p_att
return score
# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm
class DebertaV2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
pad_token_id = getattr(config, "pad_token_id", 0)
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
self.position_biased_input = getattr(config, "position_biased_input", True)
if not self.position_biased_input:
self.position_embeddings = None
else:
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
if self.embedding_size != config.hidden_size:
self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.position_embeddings is not None:
position_embeddings = self.position_embeddings(position_ids.long())
else:
position_embeddings = torch.zeros_like(inputs_embeds)
embeddings = inputs_embeds
if self.position_biased_input:
embeddings += position_embeddings
if self.config.type_vocab_size > 0:
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings += token_type_embeddings
if self.embedding_size != self.config.hidden_size:
embeddings = self.embed_proj(embeddings)
embeddings = self.LayerNorm(embeddings)
if mask is not None:
if mask.dim() != embeddings.dim():
if mask.dim() == 4:
mask = mask.squeeze(1).squeeze(1)
mask = mask.unsqueeze(2)
mask = mask.to(embeddings.dtype)
embeddings = embeddings * mask
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2
class DebertaV2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DebertaV2Config
base_model_prefix = "deberta"
_keys_to_ignore_on_load_missing = ["position_ids"]
_keys_to_ignore_on_load_unexpected = ["position_embeddings"]
def __init__(self, config):
super().__init__(config)
self._register_load_state_dict_pre_hook(self._pre_load_hook)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _pre_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
"""
Removes the classifier if it doesn't have the correct number of labels.
"""
self_state = self.state_dict()
if (
("classifier.weight" in self_state)
and ("classifier.weight" in state_dict)
and self_state["classifier.weight"].size() != state_dict["classifier.weight"].size()
):
logger.warning(
f"The checkpoint classifier head has a shape {state_dict['classifier.weight'].size()} and this model "
f"classifier head has a shape {self_state['classifier.weight'].size()}. Ignoring the checkpoint "
f"weights. You should train your model on new data."
)
del state_dict["classifier.weight"]
if "classifier.bias" in state_dict:
del state_dict["classifier.bias"]
DEBERTA_START_DOCSTRING = r"""
The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention
<https://arxiv.org/abs/2006.03654>`_ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build on top of
BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.```
Parameters:
config (:class:`~transformers.DebertaV2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
DEBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.DebertaV2Tokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaModel with Deberta->DebertaV2
class DebertaV2Model(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = DebertaV2Embeddings(config)
self.encoder = DebertaV2Encoder(config)
self.z_steps = 0
self.config = config
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
mask=attention_mask,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask,
output_hidden_states=True,
output_attentions=output_attentions,
return_dict=return_dict,
)
encoded_layers = encoder_outputs[1]
if self.z_steps > 1:
hidden_states = encoded_layers[-2]
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
query_states = encoded_layers[-1]
rel_embeddings = self.encoder.get_rel_embedding()
attention_mask = self.encoder.get_attention_mask(attention_mask)
rel_pos = self.encoder.get_rel_pos(embedding_output)
for layer in layers[1:]:
query_states = layer(
hidden_states,
attention_mask,
return_att=False,
query_states=query_states,
relative_pos=rel_pos,
rel_embeddings=rel_embeddings,
)
encoded_layers.append(query_states)
sequence_output = encoded_layers[-1]
if not return_dict:
return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top. """, DEBERTA_START_DOCSTRING)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM with Deberta->DebertaV2
class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.deberta = DebertaV2Model(config)
self.cls = DebertaV2OnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta
class DebertaV2PredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta
class DebertaV2LMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = DebertaV2PredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
class DebertaV2OnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = DebertaV2LMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
@add_start_docstrings(
"""
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification with Deberta->DebertaV2
class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
num_labels = getattr(config, "num_labels", 2)
self.num_labels = num_labels
self.deberta = DebertaV2Model(config)
self.pooler = ContextPooler(config)
output_dim = self.pooler.output_dim
self.classifier = nn.Linear(output_dim, num_labels)
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = StableDropout(drop_out)
self.init_weights()
def get_input_embeddings(self):
return self.deberta.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.deberta.set_input_embeddings(new_embeddings)
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# regression task
loss_fn = nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = loss_fn(logits, labels.view(-1))
elif labels.dim() == 1 or labels.size(-1) == 1:
label_index = (labels >= 0).nonzero()
labels = labels.long()
if label_index.size(0) > 0:
labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1)))
labels = torch.gather(labels, 0, label_index.view(-1))
loss_fct = CrossEntropyLoss()
loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
else:
loss = torch.tensor(0).to(logits)
else:
log_softmax = nn.LogSoftmax(-1)
loss = -((log_softmax(logits) * labels).sum(-1)).mean()
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
else:
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForTokenClassification with Deberta->DebertaV2
class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaV2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering with Deberta->DebertaV2
class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaV2Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| apache-2.0 |
andresgz/django | tests/template_tests/syntax_tests/test_numpy.py | 353 | 1429 | import warnings
from unittest import skipIf
from django.test import SimpleTestCase
from ..utils import setup
try:
import numpy
except ImportError:
numpy = False
@skipIf(numpy is False, "Numpy must be installed to run these tests.")
class NumpyTests(SimpleTestCase):
# Ignore numpy deprecation warnings (#23890)
warnings.filterwarnings(
"ignore",
"Using a non-integer number instead of an "
"integer will result in an error in the future",
DeprecationWarning
)
@setup({'numpy-array-index01': '{{ var.1 }}'})
def test_numpy_array_index01(self):
"""
Numpy's array-index syntax allows a template to access a certain
item of a subscriptable object.
"""
output = self.engine.render_to_string(
'numpy-array-index01',
{'var': numpy.array(["first item", "second item"])},
)
self.assertEqual(output, 'second item')
@setup({'numpy-array-index02': '{{ var.5 }}'})
def test_numpy_array_index02(self):
"""
Fail silently when the array index is out of range.
"""
output = self.engine.render_to_string(
'numpy-array-index02',
{'var': numpy.array(["first item", "second item"])},
)
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
| bsd-3-clause |
googleapis/python-compute | tests/unit/gapic/compute_v1/test_reservations.py | 1 | 72826 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.reservations import ReservationsClient
from google.cloud.compute_v1.services.reservations import pagers
from google.cloud.compute_v1.services.reservations import transports
from google.cloud.compute_v1.services.reservations.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ReservationsClient._get_default_mtls_endpoint(None) is None
assert (
ReservationsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
ReservationsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ReservationsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ReservationsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert ReservationsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [ReservationsClient,])
def test_reservations_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize("client_class", [ReservationsClient,])
def test_reservations_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
def test_reservations_client_get_transport_class():
transport = ReservationsClient.get_transport_class()
available_transports = [
transports.ReservationsRestTransport,
]
assert transport in available_transports
transport = ReservationsClient.get_transport_class("rest")
assert transport == transports.ReservationsRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ReservationsClient, transports.ReservationsRestTransport, "rest"),],
)
@mock.patch.object(
ReservationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ReservationsClient)
)
def test_reservations_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ReservationsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ReservationsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ReservationsClient, transports.ReservationsRestTransport, "rest", "true"),
(ReservationsClient, transports.ReservationsRestTransport, "rest", "false"),
],
)
@mock.patch.object(
ReservationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ReservationsClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_reservations_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ReservationsClient, transports.ReservationsRestTransport, "rest"),],
)
def test_reservations_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ReservationsClient, transports.ReservationsRestTransport, "rest"),],
)
def test_reservations_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_aggregated_list_rest(
transport: str = "rest", request_type=compute.AggregatedListReservationsRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ReservationAggregatedList(
id="id_value",
items={
"key_value": compute.ReservationsScopedList(
reservations=[compute.Reservation(commitment="commitment_value")]
)
},
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
unreachables=["unreachables_value"],
warning=compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED),
)
# Wrap the value into a proper Response obj
json_return_value = compute.ReservationAggregatedList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.aggregated_list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.AggregatedListPager)
assert response.id == "id_value"
assert response.items == {
"key_value": compute.ReservationsScopedList(
reservations=[compute.Reservation(commitment="commitment_value")]
)
}
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.unreachables == ["unreachables_value"]
assert response.warning == compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED)
def test_aggregated_list_rest_from_dict():
test_aggregated_list_rest(request_type=dict)
def test_aggregated_list_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ReservationAggregatedList()
# Wrap the value into a proper Response obj
json_return_value = compute.ReservationAggregatedList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.aggregated_list(project="project_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
def test_aggregated_list_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.aggregated_list(
compute.AggregatedListReservationsRequest(), project="project_value",
)
def test_aggregated_list_pager():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Set the response as a series of pages
response = (
compute.ReservationAggregatedList(
items={
"a": compute.ReservationsScopedList(),
"b": compute.ReservationsScopedList(),
"c": compute.ReservationsScopedList(),
},
next_page_token="abc",
),
compute.ReservationAggregatedList(items={}, next_page_token="def",),
compute.ReservationAggregatedList(
items={"g": compute.ReservationsScopedList(),}, next_page_token="ghi",
),
compute.ReservationAggregatedList(
items={
"h": compute.ReservationsScopedList(),
"i": compute.ReservationsScopedList(),
},
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.ReservationAggregatedList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
metadata = ()
pager = client.aggregated_list(request={})
assert pager._metadata == metadata
assert isinstance(pager.get("a"), compute.ReservationsScopedList)
assert pager.get("h") is None
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, tuple) for i in results)
for result in results:
assert isinstance(result, tuple)
assert tuple(type(t) for t in result) == (
str,
compute.ReservationsScopedList,
)
assert pager.get("a") is None
assert isinstance(pager.get("h"), compute.ReservationsScopedList)
pages = list(client.aggregated_list(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_delete_rest(
transport: str = "rest", request_type=compute.DeleteReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_delete_rest_from_dict():
test_delete_rest(request_type=dict)
def test_delete_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete(
project="project_value", zone="zone_value", reservation="reservation_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
assert "reservation_value" in http_call[1] + str(body)
def test_delete_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete(
compute.DeleteReservationRequest(),
project="project_value",
zone="zone_value",
reservation="reservation_value",
)
def test_get_rest(transport: str = "rest", request_type=compute.GetReservationRequest):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Reservation(
commitment="commitment_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
id="id_value",
kind="kind_value",
name="name_value",
satisfies_pzs=True,
self_link="self_link_value",
specific_reservation=compute.AllocationSpecificSKUReservation(
count="count_value"
),
specific_reservation_required=True,
status=compute.Reservation.Status.CREATING,
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Reservation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Reservation)
assert response.commitment == "commitment_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.satisfies_pzs is True
assert response.self_link == "self_link_value"
assert response.specific_reservation == compute.AllocationSpecificSKUReservation(
count="count_value"
)
assert response.specific_reservation_required is True
assert response.status == compute.Reservation.Status.CREATING
assert response.zone == "zone_value"
def test_get_rest_from_dict():
test_get_rest(request_type=dict)
def test_get_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Reservation()
# Wrap the value into a proper Response obj
json_return_value = compute.Reservation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get(
project="project_value", zone="zone_value", reservation="reservation_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
assert "reservation_value" in http_call[1] + str(body)
def test_get_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetReservationRequest(),
project="project_value",
zone="zone_value",
reservation="reservation_value",
)
def test_get_iam_policy_rest(
transport: str = "rest", request_type=compute.GetIamPolicyReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy(
audit_configs=[
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(
exempted_members=["exempted_members_value"]
)
]
)
],
bindings=[compute.Binding(binding_id="binding_id_value")],
etag="etag_value",
iam_owned=True,
rules=[compute.Rule(action=compute.Rule.Action.ALLOW)],
version=774,
)
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get_iam_policy(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Policy)
assert response.audit_configs == [
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(exempted_members=["exempted_members_value"])
]
)
]
assert response.bindings == [compute.Binding(binding_id="binding_id_value")]
assert response.etag == "etag_value"
assert response.iam_owned is True
assert response.rules == [compute.Rule(action=compute.Rule.Action.ALLOW)]
assert response.version == 774
def test_get_iam_policy_rest_from_dict():
test_get_iam_policy_rest(request_type=dict)
def test_get_iam_policy_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(
project="project_value", zone="zone_value", resource="resource_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
assert "resource_value" in http_call[1] + str(body)
def test_get_iam_policy_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
compute.GetIamPolicyReservationRequest(),
project="project_value",
zone="zone_value",
resource="resource_value",
)
def test_insert_rest(
transport: str = "rest", request_type=compute.InsertReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_insert_rest_from_dict():
test_insert_rest(request_type=dict)
def test_insert_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
reservation_resource = compute.Reservation(commitment="commitment_value")
client.insert(
project="project_value",
zone="zone_value",
reservation_resource=reservation_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
assert compute.Reservation.to_json(
reservation_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body)
def test_insert_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert(
compute.InsertReservationRequest(),
project="project_value",
zone="zone_value",
reservation_resource=compute.Reservation(commitment="commitment_value"),
)
def test_list_rest(
transport: str = "rest", request_type=compute.ListReservationsRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ReservationList(
id="id_value",
items=[compute.Reservation(commitment="commitment_value")],
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
warning=compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED),
)
# Wrap the value into a proper Response obj
json_return_value = compute.ReservationList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.items == [compute.Reservation(commitment="commitment_value")]
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.warning == compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED)
def test_list_rest_from_dict():
test_list_rest(request_type=dict)
def test_list_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ReservationList()
# Wrap the value into a proper Response obj
json_return_value = compute.ReservationList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list(
project="project_value", zone="zone_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
def test_list_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListReservationsRequest(),
project="project_value",
zone="zone_value",
)
def test_list_pager():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Set the response as a series of pages
response = (
compute.ReservationList(
items=[
compute.Reservation(),
compute.Reservation(),
compute.Reservation(),
],
next_page_token="abc",
),
compute.ReservationList(items=[], next_page_token="def",),
compute.ReservationList(
items=[compute.Reservation(),], next_page_token="ghi",
),
compute.ReservationList(
items=[compute.Reservation(), compute.Reservation(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.ReservationList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
metadata = ()
pager = client.list(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.Reservation) for i in results)
pages = list(client.list(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_resize_rest(
transport: str = "rest", request_type=compute.ResizeReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.resize(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_resize_rest_from_dict():
test_resize_rest(request_type=dict)
def test_resize_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
reservations_resize_request_resource = compute.ReservationsResizeRequest(
specific_sku_count="specific_sku_count_value"
)
client.resize(
project="project_value",
zone="zone_value",
reservation="reservation_value",
reservations_resize_request_resource=reservations_resize_request_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
assert "reservation_value" in http_call[1] + str(body)
assert compute.ReservationsResizeRequest.to_json(
reservations_resize_request_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body)
def test_resize_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.resize(
compute.ResizeReservationRequest(),
project="project_value",
zone="zone_value",
reservation="reservation_value",
reservations_resize_request_resource=compute.ReservationsResizeRequest(
specific_sku_count="specific_sku_count_value"
),
)
def test_set_iam_policy_rest(
transport: str = "rest", request_type=compute.SetIamPolicyReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy(
audit_configs=[
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(
exempted_members=["exempted_members_value"]
)
]
)
],
bindings=[compute.Binding(binding_id="binding_id_value")],
etag="etag_value",
iam_owned=True,
rules=[compute.Rule(action=compute.Rule.Action.ALLOW)],
version=774,
)
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_iam_policy(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Policy)
assert response.audit_configs == [
compute.AuditConfig(
audit_log_configs=[
compute.AuditLogConfig(exempted_members=["exempted_members_value"])
]
)
]
assert response.bindings == [compute.Binding(binding_id="binding_id_value")]
assert response.etag == "etag_value"
assert response.iam_owned is True
assert response.rules == [compute.Rule(action=compute.Rule.Action.ALLOW)]
assert response.version == 774
def test_set_iam_policy_rest_from_dict():
test_set_iam_policy_rest(request_type=dict)
def test_set_iam_policy_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# Wrap the value into a proper Response obj
json_return_value = compute.Policy.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
zone_set_policy_request_resource = compute.ZoneSetPolicyRequest(
bindings=[compute.Binding(binding_id="binding_id_value")]
)
client.set_iam_policy(
project="project_value",
zone="zone_value",
resource="resource_value",
zone_set_policy_request_resource=zone_set_policy_request_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
assert "resource_value" in http_call[1] + str(body)
assert compute.ZoneSetPolicyRequest.to_json(
zone_set_policy_request_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body)
def test_set_iam_policy_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
compute.SetIamPolicyReservationRequest(),
project="project_value",
zone="zone_value",
resource="resource_value",
zone_set_policy_request_resource=compute.ZoneSetPolicyRequest(
bindings=[compute.Binding(binding_id="binding_id_value")]
),
)
def test_test_iam_permissions_rest(
transport: str = "rest", request_type=compute.TestIamPermissionsReservationRequest
):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse(
permissions=["permissions_value"],
)
# Wrap the value into a proper Response obj
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.test_iam_permissions(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.TestPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_rest_from_dict():
test_test_iam_permissions_rest(request_type=dict)
def test_test_iam_permissions_rest_flattened():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse()
# Wrap the value into a proper Response obj
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
test_permissions_request_resource = compute.TestPermissionsRequest(
permissions=["permissions_value"]
)
client.test_iam_permissions(
project="project_value",
zone="zone_value",
resource="resource_value",
test_permissions_request_resource=test_permissions_request_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("data")
assert "project_value" in http_call[1] + str(body)
assert "zone_value" in http_call[1] + str(body)
assert "resource_value" in http_call[1] + str(body)
assert compute.TestPermissionsRequest.to_json(
test_permissions_request_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
) in http_call[1] + str(body)
def test_test_iam_permissions_rest_flattened_error():
client = ReservationsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
compute.TestIamPermissionsReservationRequest(),
project="project_value",
zone="zone_value",
resource="resource_value",
test_permissions_request_resource=compute.TestPermissionsRequest(
permissions=["permissions_value"]
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ReservationsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ReservationsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ReservationsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.ReservationsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ReservationsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ReservationsRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ReservationsClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.ReservationsRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_reservations_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ReservationsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_reservations_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.reservations.transports.ReservationsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ReservationsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"aggregated_list",
"delete",
"get",
"get_iam_policy",
"insert",
"list",
"resize",
"set_iam_policy",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_reservations_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.reservations.transports.ReservationsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ReservationsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_reservations_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.reservations.transports.ReservationsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ReservationsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_reservations_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.reservations.transports.ReservationsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ReservationsTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_reservations_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ReservationsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_reservations_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ReservationsClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_reservations_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.ReservationsRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_reservations_host_no_port():
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_reservations_host_with_port():
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ReservationsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = ReservationsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ReservationsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = ReservationsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = ReservationsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ReservationsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = ReservationsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = ReservationsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ReservationsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = ReservationsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = ReservationsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ReservationsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ReservationsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = ReservationsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ReservationsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ReservationsTransport, "_prep_wrapped_messages"
) as prep:
client = ReservationsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ReservationsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ReservationsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 |
vipulroxx/sympy | sympy/integrals/heurisch.py | 57 | 22774 | from __future__ import print_function, division
from itertools import permutations
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.mul import Mul
from sympy.core.symbol import Wild, Dummy
from sympy.core.basic import sympify
from sympy.core.numbers import Rational, pi
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.functions import exp, sin, cos, tan, cot, asin, atan
from sympy.functions import log, sinh, cosh, tanh, coth, asinh, acosh
from sympy.functions import sqrt, erf, erfi, li, Ei
from sympy.functions import besselj, bessely, besseli, besselk
from sympy.functions import hankel1, hankel2, jn, yn
from sympy.functions.elementary.exponential import LambertW
from sympy.functions.elementary.piecewise import Piecewise
from sympy.logic.boolalg import And
from sympy.utilities.iterables import uniq
from sympy.polys import quo, gcd, lcm, factor, cancel, PolynomialError
from sympy.polys.monomials import itermonomials
from sympy.polys.polyroots import root_factors
from sympy.polys.rings import PolyRing
from sympy.polys.solvers import solve_lin_sys
from sympy.polys.constructor import construct_domain
from sympy.core.compatibility import reduce, ordered
def components(f, x):
"""
Returns a set of all functional components of the given expression
which includes symbols, function applications and compositions and
non-integer powers. Fractional powers are collected with
minimal, positive exponents.
>>> from sympy import cos, sin
>>> from sympy.abc import x, y
>>> from sympy.integrals.heurisch import components
>>> components(sin(x)*cos(x)**2, x)
set([x, sin(x), cos(x)])
See Also
========
heurisch
"""
result = set()
if x in f.free_symbols:
if f.is_Symbol:
result.add(f)
elif f.is_Function or f.is_Derivative:
for g in f.args:
result |= components(g, x)
result.add(f)
elif f.is_Pow:
result |= components(f.base, x)
if not f.exp.is_Integer:
if f.exp.is_Rational:
result.add(f.base**Rational(1, f.exp.q))
else:
result |= components(f.exp, x) | set([f])
else:
for g in f.args:
result |= components(g, x)
return result
# name -> [] of symbols
_symbols_cache = {}
# NB @cacheit is not convenient here
def _symbols(name, n):
"""get vector of symbols local to this module"""
try:
lsyms = _symbols_cache[name]
except KeyError:
lsyms = []
_symbols_cache[name] = lsyms
while len(lsyms) < n:
lsyms.append( Dummy('%s%i' % (name, len(lsyms))) )
return lsyms[:n]
def heurisch_wrapper(f, x, rewrite=False, hints=None, mappings=None, retries=3,
degree_offset=0, unnecessary_permutations=None):
"""
A wrapper around the heurisch integration algorithm.
This method takes the result from heurisch and checks for poles in the
denominator. For each of these poles, the integral is reevaluated, and
the final integration result is given in terms of a Piecewise.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.functions import cos
>>> from sympy.integrals.heurisch import heurisch, heurisch_wrapper
>>> n, x = symbols('n x')
>>> heurisch(cos(n*x), x)
sin(n*x)/n
>>> heurisch_wrapper(cos(n*x), x)
Piecewise((x, Eq(n, 0)), (sin(n*x)/n, True))
See Also
========
heurisch
"""
from sympy.solvers.solvers import solve, denoms
f = sympify(f)
if x not in f.free_symbols:
return f*x
res = heurisch(f, x, rewrite, hints, mappings, retries, degree_offset,
unnecessary_permutations)
if not isinstance(res, Basic):
return res
# We consider each denominator in the expression, and try to find
# cases where one or more symbolic denominator might be zero. The
# conditions for these cases are stored in the list slns.
slns = []
for d in denoms(res):
try:
slns += solve(d, dict=True, exclude=(x,))
except NotImplementedError:
pass
if not slns:
return res
slns = list(uniq(slns))
# Remove the solutions corresponding to poles in the original expression.
slns0 = []
for d in denoms(f):
try:
slns0 += solve(d, dict=True, exclude=(x,))
except NotImplementedError:
pass
slns = [s for s in slns if s not in slns0]
if not slns:
return res
if len(slns) > 1:
eqs = []
for sub_dict in slns:
eqs.extend([Eq(key, value) for key, value in sub_dict.items()])
slns = solve(eqs, dict=True, exclude=(x,)) + slns
# For each case listed in the list slns, we reevaluate the integral.
pairs = []
for sub_dict in slns:
expr = heurisch(f.subs(sub_dict), x, rewrite, hints, mappings, retries,
degree_offset, unnecessary_permutations)
cond = And(*[Eq(key, value) for key, value in sub_dict.items()])
pairs.append((expr, cond))
pairs.append((heurisch(f, x, rewrite, hints, mappings, retries,
degree_offset, unnecessary_permutations), True))
return Piecewise(*pairs)
class BesselTable(object):
"""
Derivatives of Bessel functions of orders n and n-1
in terms of each other.
See the docstring of DiffCache.
"""
def __init__(self):
self.table = {}
self.n = Dummy('n')
self.z = Dummy('z')
self._create_table()
def _create_table(t):
table, n, z = t.table, t.n, t.z
for f in (besselj, bessely, hankel1, hankel2):
table[f] = (f(n-1, z) - n*f(n, z)/z,
(n-1)*f(n-1, z)/z - f(n, z))
f = besseli
table[f] = (f(n-1, z) - n*f(n, z)/z,
(n-1)*f(n-1, z)/z + f(n, z))
f = besselk
table[f] = (-f(n-1, z) - n*f(n, z)/z,
(n-1)*f(n-1, z)/z - f(n, z))
for f in (jn, yn):
table[f] = (f(n-1, z) - (n+1)*f(n, z)/z,
(n-1)*f(n-1, z)/z - f(n, z))
def diffs(t, f, n, z):
if f in t.table:
diff0, diff1 = t.table[f]
repl = [(t.n, n), (t.z, z)]
return (diff0.subs(repl), diff1.subs(repl))
def has(t, f):
return f in t.table
_bessel_table = None
class DiffCache(object):
"""
Store for derivatives of expressions.
The standard form of the derivative of a Bessel function of order n
contains two Bessel functions of orders n-1 and n+1, respectively.
Such forms cannot be used in parallel Risch algorithm, because
there is a linear recurrence relation between the three functions
while the algorithm expects that functions and derivatives are
represented in terms of algebraically independent transcendentals.
The solution is to take two of the functions, e.g., those of orders
n and n-1, and to express the derivatives in terms of the pair.
To guarantee that the proper form is used the two derivatives are
cached as soon as one is encountered.
Derivatives of other functions are also cached at no extra cost.
All derivatives are with respect to the same variable `x`.
"""
def __init__(self, x):
self.cache = {}
self.x = x
global _bessel_table
if not _bessel_table:
_bessel_table = BesselTable()
def get_diff(self, f):
cache = self.cache
if f in cache:
pass
elif (not hasattr(f, 'func') or
not _bessel_table.has(f.func)):
cache[f] = cancel(f.diff(self.x))
else:
n, z = f.args
d0, d1 = _bessel_table.diffs(f.func, n, z)
dz = self.get_diff(z)
cache[f] = d0*dz
cache[f.func(n-1, z)] = d1*dz
return cache[f]
def heurisch(f, x, rewrite=False, hints=None, mappings=None, retries=3,
degree_offset=0, unnecessary_permutations=None):
"""
Compute indefinite integral using heuristic Risch algorithm.
This is a heuristic approach to indefinite integration in finite
terms using the extended heuristic (parallel) Risch algorithm, based
on Manuel Bronstein's "Poor Man's Integrator".
The algorithm supports various classes of functions including
transcendental elementary or special functions like Airy,
Bessel, Whittaker and Lambert.
Note that this algorithm is not a decision procedure. If it isn't
able to compute the antiderivative for a given function, then this is
not a proof that such a functions does not exist. One should use
recursive Risch algorithm in such case. It's an open question if
this algorithm can be made a full decision procedure.
This is an internal integrator procedure. You should use toplevel
'integrate' function in most cases, as this procedure needs some
preprocessing steps and otherwise may fail.
Specification
=============
heurisch(f, x, rewrite=False, hints=None)
where
f : expression
x : symbol
rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh'
hints -> a list of functions that may appear in anti-derivate
- hints = None --> no suggestions at all
- hints = [ ] --> try to figure out
- hints = [f1, ..., fn] --> we know better
Examples
========
>>> from sympy import tan
>>> from sympy.integrals.heurisch import heurisch
>>> from sympy.abc import x, y
>>> heurisch(y*tan(x), x)
y*log(tan(x)**2 + 1)/2
See Manuel Bronstein's "Poor Man's Integrator":
[1] http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html
For more information on the implemented algorithm refer to:
[2] K. Geddes, L. Stefanus, On the Risch-Norman Integration
Method and its Implementation in Maple, Proceedings of
ISSAC'89, ACM Press, 212-217.
[3] J. H. Davenport, On the Parallel Risch Algorithm (I),
Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157.
[4] J. H. Davenport, On the Parallel Risch Algorithm (III):
Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6.
[5] J. H. Davenport, B. M. Trager, On the Parallel Risch
Algorithm (II), ACM Transactions on Mathematical
Software 11 (1985), 356-362.
See Also
========
sympy.integrals.integrals.Integral.doit
sympy.integrals.integrals.Integral
components
"""
f = sympify(f)
if x not in f.free_symbols:
return f*x
if not f.is_Add:
indep, f = f.as_independent(x)
else:
indep = S.One
rewritables = {
(sin, cos, cot): tan,
(sinh, cosh, coth): tanh,
}
if rewrite:
for candidates, rule in rewritables.items():
f = f.rewrite(candidates, rule)
else:
for candidates in rewritables.keys():
if f.has(*candidates):
break
else:
rewrite = True
terms = components(f, x)
if hints is not None:
if not hints:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
c = Wild('c', exclude=[x])
for g in set(terms): # using copy of terms
if g.is_Function:
if g.func is li:
M = g.args[0].match(a*x**b)
if M is not None:
terms.add( x*(li(M[a]*x**M[b]) - (M[a]*x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
#terms.add( x*(li(M[a]*x**M[b]) - (x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
#terms.add( x*(li(M[a]*x**M[b]) - x*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
#terms.add( li(M[a]*x**M[b]) - Ei((M[b]+1)*log(M[a]*x**M[b])/M[b]) )
elif g.func is exp:
M = g.args[0].match(a*x**2)
if M is not None:
if M[a].is_positive:
terms.add(erfi(sqrt(M[a])*x))
else: # M[a].is_negative or unknown
terms.add(erf(sqrt(-M[a])*x))
M = g.args[0].match(a*x**2 + b*x + c)
if M is not None:
if M[a].is_positive:
terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))*
erfi(sqrt(M[a])*x + M[b]/(2*sqrt(M[a]))))
elif M[a].is_negative:
terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))*
erf(sqrt(-M[a])*x - M[b]/(2*sqrt(-M[a]))))
M = g.args[0].match(a*log(x)**2)
if M is not None:
if M[a].is_positive:
terms.add(erfi(sqrt(M[a])*log(x) + 1/(2*sqrt(M[a]))))
if M[a].is_negative:
terms.add(erf(sqrt(-M[a])*log(x) - 1/(2*sqrt(-M[a]))))
elif g.is_Pow:
if g.exp.is_Rational and g.exp.q == 2:
M = g.base.match(a*x**2 + b)
if M is not None and M[b].is_positive:
if M[a].is_positive:
terms.add(asinh(sqrt(M[a]/M[b])*x))
elif M[a].is_negative:
terms.add(asin(sqrt(-M[a]/M[b])*x))
M = g.base.match(a*x**2 - b)
if M is not None and M[b].is_positive:
if M[a].is_positive:
terms.add(acosh(sqrt(M[a]/M[b])*x))
elif M[a].is_negative:
terms.add((-M[b]/2*sqrt(-M[a])*
atan(sqrt(-M[a])*x/sqrt(M[a]*x**2 - M[b]))))
else:
terms |= set(hints)
dcache = DiffCache(x)
for g in set(terms): # using copy of terms
terms |= components(dcache.get_diff(g), x)
# TODO: caching is significant factor for why permutations work at all. Change this.
V = _symbols('x', len(terms))
# sort mapping expressions from largest to smallest (last is always x).
mapping = list(reversed(list(zip(*ordered( #
[(a[0].as_independent(x)[1], a) for a in zip(terms, V)])))[1])) #
rev_mapping = dict([(v, k) for k, v in mapping]) #
if mappings is None: #
# optimizing the number of permutations of mapping #
assert mapping[-1][0] == x # if not, find it and correct this comment
unnecessary_permutations = [mapping.pop(-1)]
mappings = permutations(mapping)
else:
unnecessary_permutations = unnecessary_permutations or []
def _substitute(expr):
return expr.subs(mapping)
for mapping in mappings:
mapping = list(mapping)
mapping = mapping + unnecessary_permutations
diffs = [ _substitute(dcache.get_diff(g)) for g in terms ]
denoms = [ g.as_numer_denom()[1] for g in diffs ]
if all(h.is_polynomial(*V) for h in denoms) and _substitute(f).is_rational_function(*V):
denom = reduce(lambda p, q: lcm(p, q, *V), denoms)
break
else:
if not rewrite:
result = heurisch(f, x, rewrite=True, hints=hints,
unnecessary_permutations=unnecessary_permutations)
if result is not None:
return indep*result
return None
numers = [ cancel(denom*g) for g in diffs ]
def _derivation(h):
return Add(*[ d * h.diff(v) for d, v in zip(numers, V) ])
def _deflation(p):
for y in V:
if not p.has(y):
continue
if _derivation(p) is not S.Zero:
c, q = p.as_poly(y).primitive()
return _deflation(c)*gcd(q, q.diff(y)).as_expr()
else:
return p
def _splitter(p):
for y in V:
if not p.has(y):
continue
if _derivation(y) is not S.Zero:
c, q = p.as_poly(y).primitive()
q = q.as_expr()
h = gcd(q, _derivation(q), y)
s = quo(h, gcd(q, q.diff(y), y), y)
c_split = _splitter(c)
if s.as_poly(y).degree() == 0:
return (c_split[0], q * c_split[1])
q_split = _splitter(cancel(q / s))
return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1])
else:
return (S.One, p)
special = {}
for term in terms:
if term.is_Function:
if term.func is tan:
special[1 + _substitute(term)**2] = False
elif term.func is tanh:
special[1 + _substitute(term)] = False
special[1 - _substitute(term)] = False
elif term.func is LambertW:
special[_substitute(term)] = True
F = _substitute(f)
P, Q = F.as_numer_denom()
u_split = _splitter(denom)
v_split = _splitter(Q)
polys = set(list(v_split) + [ u_split[0] ] + list(special.keys()))
s = u_split[0] * Mul(*[ k for k, v in special.items() if v ])
polified = [ p.as_poly(*V) for p in [s, P, Q] ]
if None in polified:
return None
#--- definitions for _integrate
a, b, c = [ p.total_degree() for p in polified ]
poly_denom = (s * v_split[0] * _deflation(v_split[1])).as_expr()
def _exponent(g):
if g.is_Pow:
if g.exp.is_Rational and g.exp.q != 1:
if g.exp.p > 0:
return g.exp.p + g.exp.q - 1
else:
return abs(g.exp.p + g.exp.q)
else:
return 1
elif not g.is_Atom and g.args:
return max([ _exponent(h) for h in g.args ])
else:
return 1
A, B = _exponent(f), a + max(b, c)
if A > 1 and B > 1:
monoms = itermonomials(V, A + B - 1 + degree_offset)
else:
monoms = itermonomials(V, A + B + degree_offset)
poly_coeffs = _symbols('A', len(monoms))
poly_part = Add(*[ poly_coeffs[i]*monomial
for i, monomial in enumerate(monoms) ])
reducibles = set()
for poly in polys:
if poly.has(*V):
try:
factorization = factor(poly, greedy=True)
except PolynomialError:
factorization = poly
if factorization.is_Mul:
factors = factorization.args
else:
factors = (factorization, )
for fact in factors:
if fact.is_Pow:
reducibles.add(fact.base)
else:
reducibles.add(fact)
def _integrate(field=None):
irreducibles = set()
for poly in reducibles:
for z in poly.free_symbols:
if z in V:
break # should this be: `irreducibles |= \
else: # set(root_factors(poly, z, filter=field))`
continue # and the line below deleted?
# |
# V
irreducibles |= set(root_factors(poly, z, filter=field))
log_coeffs, log_part = [], []
B = _symbols('B', len(irreducibles))
# Note: the ordering matters here
for poly, b in reversed(list(ordered(zip(irreducibles, B)))):
if poly.has(*V):
poly_coeffs.append(b)
log_part.append(b * log(poly))
# TODO: Currently it's better to use symbolic expressions here instead
# of rational functions, because it's simpler and FracElement doesn't
# give big speed improvement yet. This is because cancelation is slow
# due to slow polynomial GCD algorithms. If this gets improved then
# revise this code.
candidate = poly_part/poly_denom + Add(*log_part)
h = F - _derivation(candidate) / denom
raw_numer = h.as_numer_denom()[0]
# Rewrite raw_numer as a polynomial in K[coeffs][V] where K is a field
# that we have to determine. We can't use simply atoms() because log(3),
# sqrt(y) and similar expressions can appear, leading to non-trivial
# domains.
syms = set(poly_coeffs) | set(V)
non_syms = set([])
def find_non_syms(expr):
if expr.is_Integer or expr.is_Rational:
pass # ignore trivial numbers
elif expr in syms:
pass # ignore variables
elif not expr.has(*syms):
non_syms.add(expr)
elif expr.is_Add or expr.is_Mul or expr.is_Pow:
list(map(find_non_syms, expr.args))
else:
# TODO: Non-polynomial expression. This should have been
# filtered out at an earlier stage.
raise PolynomialError
try:
find_non_syms(raw_numer)
except PolynomialError:
return None
else:
ground, _ = construct_domain(non_syms, field=True)
coeff_ring = PolyRing(poly_coeffs, ground)
ring = PolyRing(V, coeff_ring)
numer = ring.from_expr(raw_numer)
solution = solve_lin_sys(numer.coeffs(), coeff_ring, _raw=False)
if solution is None:
return None
else:
return candidate.subs(solution).subs(
list(zip(poly_coeffs, [S.Zero]*len(poly_coeffs))))
if not (F.free_symbols - set(V)):
solution = _integrate('Q')
if solution is None:
solution = _integrate()
else:
solution = _integrate()
if solution is not None:
antideriv = solution.subs(rev_mapping)
antideriv = cancel(antideriv).expand(force=True)
if antideriv.is_Add:
antideriv = antideriv.as_independent(x)[1]
return indep*antideriv
else:
if retries >= 0:
result = heurisch(f, x, mappings=mappings, rewrite=rewrite, hints=hints, retries=retries - 1, unnecessary_permutations=unnecessary_permutations)
if result is not None:
return indep*result
return None
| bsd-3-clause |
GodBlessPP/W17test_2nd_2 | static/Brython3.1.1-20150328-091302/Lib/xml/sax/saxutils.py | 730 | 11688 | """\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
| gpl-3.0 |
nesterione/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
wangxiaomo/rivercrab | setup.py | 1 | 1426 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from distutils.core import setup
VERSION = '0.8'
LONG_DESCRIPTION = open('README.rst').read()
INSTALL_REQUIRES = [
'beautifulsoup4',
]
PY_MAJOR, PY_MINOR = sys.version_info[:2]
if (PY_MAJOR, PY_MINOR) == (2, 6):
INSTALL_REQUIRES.append('argparse')
setup(
name='rivercrab',
version=VERSION,
description='River Crab',
long_description=LONG_DESCRIPTION,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
author='Philip Xu',
author_email='pyx@xrefactor.com',
url='https://bitbucket.org/pyx/rivercrab',
download_url=(
'https://bitbucket.org/pyx/rivercrab/get/v%s.tar.bz2' % VERSION),
scripts=['rivercrab'],
license='BSD-New',
install_requires=INSTALL_REQUIRES,
)
| bsd-3-clause |
Godiyos/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_tuple_params.py | 203 | 5565 | """Fixer for function definitions with tuple parameters.
def func(((a, b), c), d):
...
->
def func(x, d):
((a, b), c) = x
...
It will also support lambdas:
lambda (x, y): x + y -> lambda t: t[0] + t[1]
# The parens are a syntax error in Python 3
lambda (x): x + y -> lambda x: x + y
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms
def is_docstring(stmt):
return isinstance(stmt, pytree.Node) and \
stmt.children[0].type == token.STRING
class FixTupleParams(fixer_base.BaseFix):
run_order = 4 #use a lower order since lambda is part of other
#patterns
BM_compatible = True
PATTERN = """
funcdef< 'def' any parameters< '(' args=any ')' >
['->' any] ':' suite=any+ >
|
lambda=
lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
':' body=any
>
"""
def transform(self, node, results):
if "lambda" in results:
return self.transform_lambda(node, results)
new_lines = []
suite = results["suite"]
args = results["args"]
# This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
# TODO(cwinter): suite-cleanup
if suite[0].children[1].type == token.INDENT:
start = 2
indent = suite[0].children[1].value
end = Newline()
else:
start = 0
indent = "; "
end = pytree.Leaf(token.INDENT, "")
# We need access to self for new_name(), and making this a method
# doesn't feel right. Closing over self and new_lines makes the
# code below cleaner.
def handle_tuple(tuple_arg, add_prefix=False):
n = Name(self.new_name())
arg = tuple_arg.clone()
arg.prefix = ""
stmt = Assign(arg, n.clone())
if add_prefix:
n.prefix = " "
tuple_arg.replace(n)
new_lines.append(pytree.Node(syms.simple_stmt,
[stmt, end.clone()]))
if args.type == syms.tfpdef:
handle_tuple(args)
elif args.type == syms.typedargslist:
for i, arg in enumerate(args.children):
if arg.type == syms.tfpdef:
# Without add_prefix, the emitted code is correct,
# just ugly.
handle_tuple(arg, add_prefix=(i > 0))
if not new_lines:
return
# This isn't strictly necessary, but it plays nicely with other fixers.
# TODO(cwinter) get rid of this when children becomes a smart list
for line in new_lines:
line.parent = suite[0]
# TODO(cwinter) suite-cleanup
after = start
if start == 0:
new_lines[0].prefix = " "
elif is_docstring(suite[0].children[start]):
new_lines[0].prefix = indent
after = start + 1
for line in new_lines:
line.parent = suite[0]
suite[0].children[after:after] = new_lines
for i in range(after+1, after+len(new_lines)+1):
suite[0].children[i].prefix = indent
suite[0].changed()
def transform_lambda(self, node, results):
args = results["args"]
body = results["body"]
inner = simplify_args(results["inner"])
# Replace lambda ((((x)))): x with lambda x: x
if inner.type == token.NAME:
inner = inner.clone()
inner.prefix = " "
args.replace(inner)
return
params = find_params(args)
to_index = map_to_index(params)
tup_name = self.new_name(tuple_name(params))
new_param = Name(tup_name, prefix=" ")
args.replace(new_param.clone())
for n in body.post_order():
if n.type == token.NAME and n.value in to_index:
subscripts = [c.clone() for c in to_index[n.value]]
new = pytree.Node(syms.power,
[new_param.clone()] + subscripts)
new.prefix = n.prefix
n.replace(new)
### Helper functions for transform_lambda()
def simplify_args(node):
if node.type in (syms.vfplist, token.NAME):
return node
elif node.type == syms.vfpdef:
# These look like vfpdef< '(' x ')' > where x is NAME
# or another vfpdef instance (leading to recursion).
while node.type == syms.vfpdef:
node = node.children[1]
return node
raise RuntimeError("Received unexpected node %s" % node)
def find_params(node):
if node.type == syms.vfpdef:
return find_params(node.children[1])
elif node.type == token.NAME:
return node.value
return [find_params(c) for c in node.children if c.type != token.COMMA]
def map_to_index(param_list, prefix=[], d=None):
if d is None:
d = {}
for i, obj in enumerate(param_list):
trailer = [Subscript(Number(str(i)))]
if isinstance(obj, list):
map_to_index(obj, trailer, d=d)
else:
d[obj] = prefix + trailer
return d
def tuple_name(param_list):
l = []
for obj in param_list:
if isinstance(obj, list):
l.append(tuple_name(obj))
else:
l.append(obj)
return "_".join(l)
| apache-2.0 |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/Django-1.2.3/django/contrib/gis/db/backends/oracle/compiler.py | 360 | 1756 | from django.contrib.gis.db.models.sql.compiler import GeoSQLCompiler as BaseGeoSQLCompiler
from django.db.backends.oracle import compiler
SQLCompiler = compiler.SQLCompiler
class GeoSQLCompiler(BaseGeoSQLCompiler, SQLCompiler):
pass
class SQLInsertCompiler(compiler.SQLInsertCompiler, GeoSQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
ph = field.get_placeholder(val, self.connection)
if ph == 'NULL':
# If the placeholder returned is 'NULL', then we need to
# to remove None from the Query parameters. Specifically,
# cx_Oracle will assume a CHAR type when a placeholder ('%s')
# is used for columns of MDSYS.SDO_GEOMETRY. Thus, we use
# 'NULL' for the value, and remove None from the query params.
# See also #10888.
param_idx = self.query.columns.index(field.column)
params = list(self.query.params)
params.pop(param_idx)
self.query.params = tuple(params)
return ph
else:
# Return the common case for the placeholder
return '%s'
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, GeoSQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, GeoSQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, GeoSQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, GeoSQLCompiler):
pass
| apache-2.0 |
ConnorGBrewster/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/__init__.py | 3 | 1520 | """Subpackage where each product is defined. Each product is created by adding a
a .py file containing a __wptrunner__ variable in the global scope. This must be
a dictionary with the fields
"product": Name of the product, assumed to be unique.
"browser": String indicating the Browser implementation used to launch that
product.
"executor": Dictionary with keys as supported test types and values as the name
of the Executor implementation that will be used to run that test
type.
"browser_kwargs": String naming function that takes product, binary,
prefs_root and the wptrunner.run_tests kwargs dict as arguments
and returns a dictionary of kwargs to use when creating the
Browser class.
"executor_kwargs": String naming a function that takes http server url and
timeout multiplier and returns kwargs to use when creating
the executor class.
"env_options": String naming a funtion of no arguments that returns the
arguments passed to the TestEnvironment.
All classes and functions named in the above dict must be imported into the
module global scope.
"""
product_list = ["chrome",
"chrome_android",
"edge",
"fennec",
"firefox",
"ie",
"safari",
"sauce",
"servo",
"servodriver",
"opera",
"webkit"]
| mpl-2.0 |
crawfordsm/pyspectrograph | PySpectrograph/Utilities/makeplots.py | 1 | 1688 | #
# MAKEPLOTS--A library for making plots for demaniacs
#
#
#
from pylab import *
import numpy
def plotframe(data):
"""Plot the entire data array
returns a figure
"""
nimg = 10
ywidth = 0.08
xlen = len(data[0]) / nimg
for i in range(nimg):
yax = 0.90 - ywidth * 1.1 * i
x1 = xlen * i
x2 = xlen * (1 + i)
f = axes([0.1, yax, 0.8, ywidth])
f.imshow(data[:, x1:x2], cmap=cm.gray, aspect='auto', vmin=-5, vmax=50)
f.axis('off')
return f
def plotfeature(f, wave, data, w1, w2, z):
"""Plot a section of the data array
as indicated by w1 and w2
"""
w1 = w1 * (1 + z)
w2 = w2 * (1 + z)
if w1 > wave.max():
return f
mask = (w1 < wave) * (wave < w2)
mdata = data[:, mask]
f.imshow(mdata, cmap=cm.gray, aspect='auto', vmin=-5, vmax=50)
# set up the axis labels
x1 = wave[mask][0]
x2 = wave[mask][-1]
dw = (x2 - x1) / 5
xtarr = arange(x1, x2 + dw, dw)
xtlab = []
for x in xticks()[0]:
if x >= 0 and x < len(wave[mask]):
x = wave[mask][x]
xtlab.append('%4.2f' % x)
else:
xtlab.append('0')
f.set_yticklabels([])
f.set_xticklabels([])
return f
def plotlinefeature(f, wave, flux, w1, w2, z):
w1 = w1 * (1 + z)
w2 = w2 * (1 + z)
mask = (w1 < wave) * (wave < w2)
f = plotline(f, wave[mask], flux[mask])
return f
def plotline(f, wave, flux, color=None):
if color:
f.plot(wave, flux, ls='-', color=color, lw=1.55)
else:
f.plot(wave, flux, ls='-', lw=1.55)
f.set_xlim((wave[0], wave[-1]))
# f.set_yticklabels([])
return f
| bsd-3-clause |
azurestandard/django | tests/regressiontests/utils/text.py | 9 | 6021 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.test import SimpleTestCase
from django.utils import text
class TestUtilsText(SimpleTestCase):
# In Django 1.6 truncate_words() and truncate_html_words() will be removed
# so these tests will need to be adapted accordingly
def test_truncate_chars(self):
truncator = text.Truncator(
'The quick brown fox jumped over the lazy dog.'
)
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.chars(100)),
self.assertEqual('The quick brown fox ...',
truncator.chars(23)),
self.assertEqual('The quick brown fo.....',
truncator.chars(23, '.....')),
# Ensure that we normalize our unicode data first
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü...', nfc.chars(5))
self.assertEqual('oü...', nfd.chars(5))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A...', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('...', text.Truncator('asdf').chars(1))
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy '
'dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.words(10))
self.assertEqual('The quick brown fox...', truncator.words(4))
self.assertEqual('The quick brown fox[snip]',
truncator.words(4, '[snip]'))
def test_truncate_html_words(self):
truncator = text.Truncator('<p><strong><em>The quick brown fox jumped '
'over the lazy dog.</em></strong></p>')
self.assertEqual('<p><strong><em>The quick brown fox jumped over the '
'lazy dog.</em></strong></p>', truncator.words(10, html=True))
self.assertEqual('<p><strong><em>The quick brown fox...</em>'
'</strong></p>', truncator.words(4, html=True))
self.assertEqual('<p><strong><em>The quick brown fox....</em>'
'</strong></p>', truncator.words(4, '....', html=True))
self.assertEqual('<p><strong><em>The quick brown fox</em></strong>'
'</p>', truncator.words(4, '', html=True))
# Test with new line inside tag
truncator = text.Truncator('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown fox</a> jumped over the lazy dog.</p>')
self.assertEqual('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown...</a></p>', truncator.words(3, '...', html=True))
def test_old_truncate_words(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual('The quick brown fox jumped over the lazy dog.',
text.truncate_words('The quick brown fox jumped over the lazy dog.', 10))
self.assertEqual('The quick brown fox ...',
text.truncate_words('The quick brown fox jumped over the lazy dog.', 4))
self.assertEqual('The quick brown fox ....',
text.truncate_words('The quick brown fox jumped over the lazy dog.', 4, '....'))
self.assertGreater(len(w), 0)
def test_old_truncate_html_words(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual('<p><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>',
text.truncate_html_words('<p><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>', 10))
self.assertEqual('<p><strong><em>The quick brown fox ...</em></strong></p>',
text.truncate_html_words('<p><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>', 4))
self.assertEqual('<p><strong><em>The quick brown fox ....</em></strong></p>',
text.truncate_html_words('<p><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>', 4, '....'))
self.assertEqual('<p><strong><em>The quick brown fox</em></strong></p>',
text.truncate_html_words('<p><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>', 4, None))
self.assertGreater(len(w), 0)
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7),
'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8),
'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10),
'a\n%s\nword' % long_word)
| bsd-3-clause |
shuangshuangwang/spark | python/pyspark/sql/tests/test_pandas_udf.py | 1 | 10216 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
from pyspark.sql.types import DoubleType, StructType, StructField, LongType
from pyspark.sql.utils import ParseException, PythonException
from pyspark.rdd import PythonEvalType
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid return type.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegexp(TypeError, 'Invalid return type'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(TypeError, 'Invalid return type'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
def test_stopiteration_in_udf(self):
def foo(x):
raise StopIteration()
def foofoo(x, y):
raise StopIteration()
exc_message = "Caught StopIteration thrown from user's code; failing the task"
df = self.spark.range(0, 100)
# plain udf (test for SPARK-23754)
self.assertRaisesRegexp(
PythonException,
exc_message,
df.withColumn('v', udf(foo)('id')).collect
)
# pandas scalar udf
self.assertRaisesRegexp(
PythonException,
exc_message,
df.withColumn(
'v', pandas_udf(foo, 'double', PandasUDFType.SCALAR)('id')
).collect
)
# pandas grouped map
self.assertRaisesRegexp(
PythonException,
exc_message,
df.groupBy('id').apply(
pandas_udf(foo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
self.assertRaisesRegexp(
PythonException,
exc_message,
df.groupBy('id').apply(
pandas_udf(foofoo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
# pandas grouped agg
self.assertRaisesRegexp(
PythonException,
exc_message,
df.groupBy('id').agg(
pandas_udf(foo, 'double', PandasUDFType.GROUPED_AGG)('id')
).collect
)
def test_pandas_udf_detect_unsafe_type_conversion(self):
import pandas as pd
import numpy as np
values = [1.0] * 3
pdf = pd.DataFrame({'A': values})
df = self.spark.createDataFrame(pdf).repartition(1)
@pandas_udf(returnType="int")
def udf(column):
return pd.Series(np.linspace(0, 1, len(column)))
# Since 0.11.0, PyArrow supports the feature to raise an error for unsafe cast.
with self.sql_conf({
"spark.sql.execution.pandas.convertToArrowArraySafely": True}):
with self.assertRaisesRegexp(Exception,
"Exception thrown when converting pandas.Series"):
df.select(['A']).withColumn('udf', udf('A')).collect()
# Disabling Arrow safe type check.
with self.sql_conf({
"spark.sql.execution.pandas.convertToArrowArraySafely": False}):
df.select(['A']).withColumn('udf', udf('A')).collect()
def test_pandas_udf_arrow_overflow(self):
import pandas as pd
df = self.spark.range(0, 1)
@pandas_udf(returnType="byte")
def udf(column):
return pd.Series([128] * len(column))
# When enabling safe type check, Arrow 0.11.0+ disallows overflow cast.
with self.sql_conf({
"spark.sql.execution.pandas.convertToArrowArraySafely": True}):
with self.assertRaisesRegexp(Exception,
"Exception thrown when converting pandas.Series"):
df.withColumn('udf', udf('id')).collect()
# Disabling safe type check, let Arrow do the cast anyway.
with self.sql_conf({"spark.sql.execution.pandas.convertToArrowArraySafely": False}):
df.withColumn('udf', udf('id')).collect()
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
moylop260/odoo-dev | addons/crm/crm_phonecall.py | 32 | 14776 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class crm_phonecall(osv.osv):
""" Model for CRM phonecalls """
_name = "crm.phonecall"
_description = "Phonecall"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'create_date': fields.datetime('Creation Date' , readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.'),
'user_id': fields.many2one('res.users', 'Responsible'),
'partner_id': fields.many2one('res.partner', 'Contact'),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Description'),
'state': fields.selection(
[('open', 'Confirmed'),
('cancel', 'Cancelled'),
('pending', 'Pending'),
('done', 'Held')
], string='Status', readonly=True, track_visibility='onchange',
help='The status is set to Confirmed, when a case is created.\n'
'When the call is over, the status is set to Held.\n'
'If the callis not applicable anymore, the status can be set to Cancelled.'),
'email_from': fields.char('Email', size=128, help="These people will receive email."),
'date_open': fields.datetime('Opened', readonly=True),
# phonecall fields
'name': fields.char('Call Summary', size=64, required=True),
'active': fields.boolean('Active', required=False),
'duration': fields.float('Duration', help='Duration in minutes and seconds.'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',section_id),('section_id','=',False),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_phone': fields.char('Phone', size=32),
'partner_mobile': fields.char('Mobile', size=32),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Date'),
'opportunity_id': fields.many2one ('crm.lead', 'Lead/Opportunity'),
}
def _get_default_state(self, cr, uid, context=None):
if context and context.get('default_state'):
return context.get('default_state')
return 'open'
_defaults = {
'date': fields.datetime.now,
'priority': '1',
'state': _get_default_state,
'user_id': lambda self, cr, uid, ctx: uid,
'active': 1
}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'partner_phone': partner.phone,
'partner_mobile': partner.mobile,
}
return {'value': values}
def write(self, cr, uid, ids, values, context=None):
""" Override to add case management: open/close dates """
if values.get('state'):
if values.get('state') == 'done':
values['date_closed'] = fields.datetime.now()
self.compute_duration(cr, uid, ids, context=context)
elif values.get('state') == 'open':
values['date_open'] = fields.datetime.now()
values['duration'] = 0.0
return super(crm_phonecall, self).write(cr, uid, ids, values, context=context)
def compute_duration(self, cr, uid, ids, context=None):
for phonecall in self.browse(cr, uid, ids, context=context):
if phonecall.duration <= 0:
duration = datetime.now() - datetime.strptime(phonecall.date, DEFAULT_SERVER_DATETIME_FORMAT)
values = {'duration': duration.seconds/float(60)}
self.write(cr, uid, [phonecall.id], values, context=context)
return True
def schedule_another_phonecall(self, cr, uid, ids, schedule_time, call_summary, \
user_id=False, section_id=False, categ_id=False, action='schedule', context=None):
"""
action :('schedule','Schedule a call'), ('log','Log a call')
"""
model_data = self.pool.get('ir.model.data')
phonecall_dict = {}
if not categ_id:
try:
res_id = model_data._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = model_data.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
for call in self.browse(cr, uid, ids, context=context):
if not section_id:
section_id = call.section_id and call.section_id.id or False
if not user_id:
user_id = call.user_id and call.user_id.id or False
if not schedule_time:
schedule_time = call.date
vals = {
'name' : call_summary,
'user_id' : user_id or False,
'categ_id' : categ_id or False,
'description' : call.description or False,
'date' : schedule_time,
'section_id' : section_id or False,
'partner_id': call.partner_id and call.partner_id.id or False,
'partner_phone' : call.partner_phone,
'partner_mobile' : call.partner_mobile,
'priority': call.priority,
}
new_id = self.create(cr, uid, vals, context=context)
if action == 'log':
self.write(cr, uid, [new_id], {'state': 'done'}, context=context)
phonecall_dict[call.id] = new_id
return phonecall_dict
def _call_create_partner(self, cr, uid, phonecall, context=None):
partner = self.pool.get('res.partner')
partner_id = partner.create(cr, uid, {
'name': phonecall.name,
'user_id': phonecall.user_id.id,
'comment': phonecall.description,
'address': []
})
return partner_id
def on_change_opportunity(self, cr, uid, ids, opportunity_id, context=None):
values = {}
if opportunity_id:
opportunity = self.pool.get('crm.lead').browse(cr, uid, opportunity_id, context=context)
values = {
'section_id' : opportunity.section_id and opportunity.section_id.id or False,
'partner_phone' : opportunity.phone,
'partner_mobile' : opportunity.mobile,
'partner_id' : opportunity.partner_id and opportunity.partner_id.id or False,
}
return {'value' : values}
def _call_set_partner(self, cr, uid, ids, partner_id, context=None):
write_res = self.write(cr, uid, ids, {'partner_id' : partner_id}, context=context)
self._call_set_partner_send_note(cr, uid, ids, context)
return write_res
def _call_create_partner_address(self, cr, uid, phonecall, partner_id, context=None):
address = self.pool.get('res.partner')
return address.create(cr, uid, {
'parent_id': partner_id,
'name': phonecall.name,
'phone': phonecall.partner_phone,
})
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to specified partner_id
:param list ids: phonecalls ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this is a duplication of the handle_partner_assignation method of crm_lead
partner_ids = {}
# If a partner_id is given, force this partner for all elements
force_partner_id = partner_id
for call in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if action == 'create':
partner_id = force_partner_id or self._call_create_partner(cr, uid, call, context=context)
self._call_create_partner_address(cr, uid, call, partner_id, context=context)
self._call_set_partner(cr, uid, [call.id], partner_id, context=context)
partner_ids[call.id] = partner_id
return partner_ids
def redirect_phonecall_view(self, cr, uid, phonecall_id, context=None):
model_data = self.pool.get('ir.model.data')
# Select the view
tree_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_tree_view')
form_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_form_view')
search_view = model_data.get_object_reference(cr, uid, 'crm', 'view_crm_case_phonecalls_filter')
value = {
'name': _('Phone Call'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'crm.phonecall',
'res_id' : int(phonecall_id),
'views': [(form_view and form_view[1] or False, 'form'), (tree_view and tree_view[1] or False, 'tree'), (False, 'calendar')],
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False,
}
return value
def convert_opportunity(self, cr, uid, ids, opportunity_summary=False, partner_id=False, planned_revenue=0.0, probability=0.0, context=None):
partner = self.pool.get('res.partner')
opportunity = self.pool.get('crm.lead')
opportunity_dict = {}
default_contact = False
for call in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = call.partner_id and call.partner_id.id or False
if partner_id:
address_id = partner.address_get(cr, uid, [partner_id])['default']
if address_id:
default_contact = partner.browse(cr, uid, address_id, context=context)
opportunity_id = opportunity.create(cr, uid, {
'name': opportunity_summary or call.name,
'planned_revenue': planned_revenue,
'probability': probability,
'partner_id': partner_id or False,
'mobile': default_contact and default_contact.mobile,
'section_id': call.section_id and call.section_id.id or False,
'description': call.description or False,
'priority': call.priority,
'type': 'opportunity',
'phone': call.partner_phone or False,
'email_from': default_contact and default_contact.email,
})
vals = {
'partner_id': partner_id,
'opportunity_id': opportunity_id,
'state': 'done',
}
self.write(cr, uid, [call.id], vals, context=context)
opportunity_dict[call.id] = opportunity_id
return opportunity_dict
def action_make_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule a meeting on current phonecall.
:return dict: dictionary value for created meeting view
"""
partner_ids = []
phonecall = self.browse(cr, uid, ids[0], context)
if phonecall.partner_id and phonecall.partner_id.email:
partner_ids.append(phonecall.partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_ids': partner_ids,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_name': phonecall.name,
}
return res
def action_button_convert2opportunity(self, cr, uid, ids, context=None):
"""
Convert a phonecall into an opp and then redirect to the opp view.
:param list ids: list of calls ids to convert (typically contains a single id)
:return dict: containing view information
"""
if len(ids) != 1:
raise osv.except_osv(_('Warning!'),_('It\'s only possible to convert one phonecall at a time.'))
opportunity_dict = self.convert_opportunity(cr, uid, ids, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, opportunity_dict[ids[0]], context)
# ----------------------------------------
# OpenChatter
# ----------------------------------------
def _call_set_partner_send_note(self, cr, uid, ids, context=None):
return self.message_post(cr, uid, ids, body=_("Partner has been <b>created</b>."), context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shenlong3030/asv-django-guestbook | django/db/backends/oracle/creation.py | 9 | 11427 | import sys, time
from django.db.backends.creation import BaseDatabaseCreation
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
def __init__(self, connection):
self.remember = {}
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
self.remember['user'] = self.connection.settings_dict['USER']
self.remember['passwd'] = self.connection.settings_dict['PASSWORD']
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print "Creating test user..."
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test user..."
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print "Creating test user..."
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict["USER"] = TEST_USER
self.connection.settings_dict["PASSWORD"] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict["USER"] = self.remember['user']
self.connection.settings_dict["PASSWORD"] = self.remember['passwd']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print 'Destroying test user...'
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print 'Destroying test database tables...'
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_db(): dbname = %s" % parameters['dbname']
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_user(): username = %s" % parameters['user']
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_execute_test_db_destruction(): dbname=%s" % parameters['dbname']
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_destroy_test_user(): user=%s" % parameters['user']
print "Be patient. This can take some time..."
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print stmt
try:
cursor.execute(stmt)
except Exception, err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
| bsd-3-clause |
robert-giaquinto/text-analysis | src/topic_model/build_cdtm_data.py | 1 | 2688 | from __future__ import division, print_function, absolute_import
import os
import argparse
def day_counts(keys_file, rounding_days=None):
"""
keys_file: input file with float of date (normalized so first
journals begin at zero) in the 4th column.
rounding_days: number of days to round the days to.
ex) 7 means round to nearest week)
"""
rval = {}
with open(keys_file, "r") as f:
for line in f:
fields = line.split("\t")
day = int(round(float(fields[3])))
if rounding_days is not None:
day = round(day / rounding_days) * rounding_days
if day in rval:
rval[day] += 1
else:
rval[day] = 1
return rval
def build_data(keys_file, out_file, ldac_file, rounding_days=None):
days = day_counts(keys_file, rounding_days)
n_days = len(days)
print("Found", n_days, "unique timestamps")
print("Writing day ", end='')
with open(out_file, 'wb') as out, open(ldac_file, "r") as ldac:
out.write(str(n_days) + '\n')
for day, n_docs in sorted(days.iteritems()):
print(day, end=', ')
out.write(str(day) + '\n')
out.write(str(n_docs) + '\n')
for i in range(n_docs):
bow = ldac.readline()
out.write(bow)
print('\nDone!')
def main():
parser = argparse.ArgumentParser(description='build cdtm data')
parser.add_argument('--data_dir', type=str, help='Data directory where input and output files should be/go.')
parser.add_argument('--train_keys', type=str, help='train keys file.')
parser.add_argument('--test_keys', type=str, help='test keys file.')
parser.add_argument('--train_out', type=str, help='train out file.')
parser.add_argument('--test_out', type=str, help='test out file.')
parser.add_argument('--rounding_days', type=int, default=1, help='number of days to round relative date to (to reduce number of time points)')
args = parser.parse_args()
print('build_cdtm_data.py')
print(args)
train_keys = os.path.join(args.data_dir, args.train_keys)
test_keys = os.path.join(args.data_dir, args.test_keys)
train_out = os.path.join(args.data_dir, args.train_out)
test_out = os.path.join(args.data_dir, args.test_out)
train_ldac = os.path.join(args.data_dir, 'train-mult.dat')
test_ldac = os.path.join(args.data_dir, 'test-mult.dat')
build_data(train_keys, train_out, train_ldac, args.rounding_days)
build_data(test_keys, test_out, test_ldac, args.rounding_days)
if __name__ == "__main__":
main()
| mit |
pybee-attic/toga-cocoa | toga_cocoa/command.py | 2 | 1189 | from .libs import *
from .widgets.icon import Icon
class Command(object):
def __init__(self, action, label=None, tooltip=None, icon=None):
self.action = action
self.label = label
self.tooltip = tooltip
self.icon = Icon.load(icon)
self._enabled = True
self._widgets = []
@property
def toolbar_identifier(self):
return 'toolbarItem-%s' % id(self)
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
for widget in self._widgets:
widget.setEnabled_(value)
class SpecialCommand(object):
def __init__(self, toolbar_identifier):
self._toolbar_identifier = toolbar_identifier
self.label = None
self.tooltip = None
self.icon = None
self._widgets = []
@property
def toolbar_identifier(self):
return self._toolbar_identifier
@property
def enabled(self):
return True
SEPARATOR = SpecialCommand('NSToolbarSeparatorItem')
SPACER = SpecialCommand('NSToolbarSpaceItem')
EXPANDING_SPACER = SpecialCommand('NSToolbarFlexibleSpaceItem')
| bsd-3-clause |
PhloxAR/phloxar | PhloxAR/dc1394/mode.py | 1 | 12891 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from .core import *
from ctypes import c_int32, c_uint32, c_uint64, c_float, byref, pointer
__all__ = [
'Mode', 'Format7', 'mode_map'
]
class Mode(object):
"""
Video mode for a DC1394 camera.
Do not instantiate this class directly. Instead use one of the modes
in 'Camera.modes' or 'Camera.modes_dict' and assign it to 'Camera.mode'.
"""
_mode_id = None
_cam = None
_color_coding = None
_dtype = None
def __init__(self, cam, mode_id):
self._mode_id = mode_id
self._cam = cam
self._dtype_shape()
def __repr__(self):
return self.name
def __eq__(self, other):
return self._mode_id == other.mode_id
def _dtype_shape(self):
"""
Data type and shape.
"""
import numpy
w = c_int32()
h = c_int32()
dll.dc1394_get_image_size_from_video_mode(self._cam, self._mode_id,
byref(w), byref(h))
self._shape = (h.value, w.value)
cc = color_coding_t()
dll.dc1394_get_color_coding_from_video_mode(
self._cam, self._mode_id, byref(cc))
self._color_coding = color_codings[cc.value]
self._dtype = '<u1'
if '8' in self._color_coding:
self._dtype = '>u1'
elif '16' in self._color_coding:
self._dtype = '>u2'
elif 'YUV' in self._color_coding:
print("Warning: YUV image format!")
# the data depth is 8 bit in the buffer,
# but 12 or 16 bit in a _color pixel.
self._dtype = ">u1"
else:
print("Nonstandard image format: %s" % mode[-1])
self._dtype = ">u1"
if "RGB" in self._color_coding:
self._shape.append(3)
@property
def mode_id(self):
return self._mode_id
@property
def name(self):
"""
A descriptive name for this mode.
"""
return video_modes[self._mode_id]
@property
def framerate(self):
"""
Allowed framerate if the camera is in this mode.
"""
return video_modes[self._mode_id]
@property
def shape(self):
"""
The size in pixels of frames acquired in this mode.
"""
return self._shape
@property
def color_coding(self):
"""
The type of _color coding of pixels.
"""
return self._color_coding
@property
def scalable(self):
"""
Is this video scalable?
"""
return bool(dll.dc1394_is_video_mode_scalable(self._mode_id))
@property
def dtype(self):
"""
The numpy data type of an image of this mode.
"""
return self._dtype
class Exif(Mode):
pass
class Format7(Mode):
"""
Format7 modes are flexible modes that support:
* acquiring and transferring only a subsection of the frame for
faster acquisition: regio-of-interes (ROI)
* binning the pixels of the sensor for faster acquisition and
reduced readout noise. The binning strategy in the different
Format7 modes is defined by the vendor.
Many aspects of Format7 modes can be altered while an acquisition is
in progress. A notable exception from this is the size of thepacket.
Use 'max_image_size', 'unit_size', 'unit_position, 'color_codings',
and 'data_depth' to obtain information about the mode and then set
its parameters via the attributes 'image_size', 'image_position',
'color_coding', and 'packet_size' or all of them via the 'roi'
attribute or with a call to 'setup.
All settings are sent to the hardware right away.
"""
@property
def frame_interval(self):
"""
The current frame interval in this format7 mode in seconds.
Read-only.
Use the 'Camera.framerate' and 'Camera.shutter'
features (if present) to influence the framerate.
"""
fi = c_float()
dll.dc1394_get_frame_interval(self._cam, self._mode_id, byref(fi))
return fi.value
@property
def max_image_size(self):
"""
The maximum size (horizontal and vertical) of the ROI in pixels.
Read-only.
"""
hsize = c_uint32()
vsize = c_uint32()
dll.dc1394_format7_get_max_image_size(self._cam, self._mode_id,
byref(hsize), byref(vsize))
return hsize.value, vsize.value
@property
def image_size(self):
"""
The current size (horizontal and vertical) of the ROI in pixels.
The image size can only be a multiple of the :attr:`unit_size`, and
cannot be smaller than it.
"""
hsize = c_uint32()
vsize = c_uint32()
dll.dc1394_format7_get_image_size(self._cam, self._mode_id,
byref(hsize), byref(vsize))
return hsize.value, vsize.value
@image_size.setter
def image_size(self, width, height):
x = c_uint32()
y = c_uint32()
dll.dc1394_format7_get_image_position(self._cam, self._mode_id,
byref(x), byref(y))
return x.value, y.value
@property
def image_position(self):
"""
The start position of the upper left corner of the ROI in
pixels (horizontal and vertical).
The image position can only be a multiple of the unit position
(zero is acceptable).
"""
x = c_uint32()
y = c_uint32()
dll.dc1394_format7_get_image_position(self._cam, self._mode_id,
byref(x), byref(y))
return x.value, y.value
@image_position.setter
def image_position(self, pos):
x, y = pos
dll.dc1394_format7_set_image_position(self._cam, self._mode_id, x, y)
@property
def color_codings(self):
"""
Allowed _color codings in this mode. Read-only.
"""
pos_codings = color_codings_t()
dll.dc1394_format7_get_color_codings(self._cam, self._mode_id,
byref(pos_codings))
return [color_codings[i] for i in pos_codings.codings[:pos_codings.num]]
@property
def color_coding(self):
"""
The current _color coding.
"""
cc = color_coding_t()
dll.dc1394_format7_get_color_coding(self._cam, self._mode_id, byref(cc))
return color_codings[cc.value]
@color_coding.setter
def color_coding(self, color):
code = color_codings[color]
dll.dc1394_format7_set_color_coding(self._cam, self._mode_id, code)
@property
def unit_position(self):
"""
Horizontal and vertical 'image_position' multiples.
Read-only.
"""
h_unit = c_uint32()
v_unit = c_uint32()
dll.dc1394_format7_get_unit_position(self._cam, self._mode_id,
byref(h_unit), byref(v_unit))
return h_unit.value, v_unit.value
@property
def unit_size(self):
"""
Horizontal and vertical :attr:`image_size` multiples. Read-only.
"""
h_unit = c_uint32()
v_unit = c_uint32()
dll.dc1394_format7_get_unit_size(self._cam, self._mode_id,
byref(h_unit), byref(v_unit))
return h_unit.value, v_unit.value
@property
def roi(self):
"""
Get and set all Format7 parameters at once.
The following definitions can be used to set ROI of Format7 in
a simpler fashion:
* QUERY_FROM_CAMERA (-1) will use the current value used by the
camera,
* USE_MAX_AVAIL will (-2) set the value to its maximum and
* USE_RECOMMENDED (-3) can be used for the bytes-per-packet
setting.
"""
w, h, x, y = c_int32(), c_int32(), c_int32(), c_int32()
cco, packet_size = color_coding_t(), c_int32()
dll.dc1394_format7_get_roi(self._cam, self._mode_id, pointer(cco),
byref(packet_size),
byref(x), byref(y), byref(w), byref(h))
return ((w.value, h.value), (x.value, y.value),
color_codings[cco.value], packet_size.value)
@roi.setter
def roi(self, args):
size, position, color, packet_size = args
dll.dc1394_format7_set_roi(self._cam, self._mode_id,
color_codings[color], packet_size,
position[0], position[1], size[0], size[1])
@property
def dtype(self):
self._dtype_shape()
return self._dtype
@property
def shape(self):
self._dtype_shape()
return self._shape
@property
def recommended_packet_size(self):
"""
Recommended number of bytes per packet. Read-only.
"""
packet_size = c_uint32()
dll.dc1394_format7_get_recommended_packet_size(self._cam, self._mode_id,
byref(packet_size))
return packet_size.value
@property
def packet_parameters(self):
"""
Maximum number and unit size of bytes per packet. Read-only.
Get the parameters of the packet size: its maximal size and its
unit size. The packet size is always a multiple of the unit
bytes and cannot be zero.
"""
packet_size_max = c_uint32()
packet_size_unit = c_uint32()
dll.dc1394_format7_get_packet_parameters(self._cam, self._mode_id,
byref(packet_size_unit),
byref(packet_size_max))
return packet_size_unit.value, packet_size_max.value
@property
def packet_size(self):
"""
Current number of bytes per packet.
"""
packet_size = c_uint32()
dll.dc1394_format7_get_packet_size(self._cam, self._mode_id,
byref(packet_size))
return packet_size.value
@packet_size.setter
def packet_size(self, packet_size):
dll.dc1394_format7_set_packet_size(self._cam, self._mode_id,
int(packet_size))
@property
def total_bytes(self):
"""
Current total number of bytes per frame. Read-only.
This includes padding (to reach an entire number of packets).
Use :attr:`packet_size` to influence its value.
"""
ppf = c_uint64()
dll.dc1394_format7_get_total_bytes(self._cam, self._mode_id, byref(ppf))
return ppf.value
@property
def pixel_number(self):
"""
The number of pixels per frame. Read-only.
"""
px = c_uint32()
dll.dc1394_format7_get_pixel_number(self._cam, self._mode_id, byref(px))
return px.value
@property
def data_depth(self):
"""
The number of bits per pixel. Read-only.
Need not be a multiple of 8.
"""
dd = c_uint32()
dll.dc1394_format7_get_data_depth(self._cam, self._mode_id, byref(dd))
return dd.value
def setup(self, image_size=(QUERY_FROM_CAMERA, QUERY_FROM_CAMERA),
image_position=(QUERY_FROM_CAMERA, QUERY_FROM_CAMERA),
color_coding=QUERY_FROM_CAMERA, packet_size=USE_RECOMMANDED):
"""
Setup this Format7 mode.
Similar to setting :attr:`roi` but size and positiogn are made
multiples of :attr:`unit_size` and :attr:`unit_position`. All
arguments are optional and default to not changing the current
value. :attr:`packet_size` is set to the recommended value.
"""
wu, hu = self.unit_size
xu, yu = self.unit_position
position = xu*int(image_position[0]/xu), yu*int(image_position[1]/yu)
size = wu*int(image_size[0]/wu), hu*int(image_size[1]/hu)
self.roi = size, position, color_coding, packet_size
return self.roi
mode_map = {
64: Mode,
65: Mode,
66: Mode,
67: Mode,
68: Mode,
69: Mode,
70: Mode,
71: Mode,
72: Mode,
73: Mode,
74: Mode,
75: Mode,
76: Mode,
77: Mode,
78: Mode,
79: Mode,
80: Mode,
81: Mode,
82: Mode,
83: Mode,
84: Mode,
85: Mode,
86: Mode,
87: Exif,
88: Format7,
89: Format7,
90: Format7,
91: Format7,
92: Format7,
93: Format7,
94: Format7,
95: Format7,
}
def create_mode(cam, m):
if isinstance(m, tuple):
m = "%sx%s_%s" % m
return Mode(cam, video_modes[m])
| apache-2.0 |
gmcastil/numpy | numpy/ma/extras.py | 10 | 55238 | """
Masked arrays add-ons.
A collection of utilities for `numpy.ma`.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
from __future__ import division, absolute_import, print_function
__all__ = [
'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
'atleast_3d', 'average', 'clump_masked', 'clump_unmasked',
'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols',
'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot',
'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges',
'hsplit', 'hstack', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols',
'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_',
'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
'setdiff1d', 'setxor1d', 'unique', 'union1d', 'vander', 'vstack',
]
import itertools
import warnings
from . import core as ma
from .core import (
MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,
mask_rowcols
)
import numpy as np
from numpy import ndarray, array as nxarray
import numpy.core.umath as umath
from numpy.lib.function_base import _ureduce
from numpy.lib.index_tricks import AxisConcatenator
def issequence(seq):
"""
Is seq a sequence (ndarray, list or tuple)?
"""
return isinstance(seq, (ndarray, tuple, list))
def count_masked(arr, axis=None):
"""
Count the number of masked elements along the given axis.
Parameters
----------
arr : array_like
An array with (possibly) masked elements.
axis : int, optional
Axis along which to count. If None (default), a flattened
version of the array is used.
Returns
-------
count : int, ndarray
The total number of masked elements (axis=None) or the number
of masked elements along each slice of the given axis.
See Also
--------
MaskedArray.count : Count non-masked elements.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(9).reshape((3,3))
>>> a = ma.array(a)
>>> a[1, 0] = ma.masked
>>> a[1, 2] = ma.masked
>>> a[2, 1] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False False False]
[ True False True]
[False True False]],
fill_value=999999)
>>> ma.count_masked(a)
3
When the `axis` keyword is used an array is returned.
>>> ma.count_masked(a, axis=0)
array([1, 1, 1])
>>> ma.count_masked(a, axis=1)
array([0, 2, 1])
"""
m = getmaskarray(arr)
return m.sum(axis)
def masked_all(shape, dtype=float):
"""
Empty masked array with all elements masked.
Return an empty masked array of the given shape and dtype, where all the
data are masked.
Parameters
----------
shape : tuple
Shape of the required MaskedArray.
dtype : dtype, optional
Data type of the output.
Returns
-------
a : MaskedArray
A masked array with all data masked.
See Also
--------
masked_all_like : Empty masked array modelled on an existing array.
Examples
--------
>>> import numpy.ma as ma
>>> ma.masked_all((3, 3))
masked_array(data =
[[-- -- --]
[-- -- --]
[-- -- --]],
mask =
[[ True True True]
[ True True True]
[ True True True]],
fill_value=1e+20)
The `dtype` parameter defines the underlying data type.
>>> a = ma.masked_all((3, 3))
>>> a.dtype
dtype('float64')
>>> a = ma.masked_all((3, 3), dtype=np.int32)
>>> a.dtype
dtype('int32')
"""
a = masked_array(np.empty(shape, dtype),
mask=np.ones(shape, make_mask_descr(dtype)))
return a
def masked_all_like(arr):
"""
Empty masked array with the properties of an existing array.
Return an empty masked array of the same shape and dtype as
the array `arr`, where all the data are masked.
Parameters
----------
arr : ndarray
An array describing the shape and dtype of the required MaskedArray.
Returns
-------
a : MaskedArray
A masked array with all data masked.
Raises
------
AttributeError
If `arr` doesn't have a shape attribute (i.e. not an ndarray)
See Also
--------
masked_all : Empty masked array with all elements masked.
Examples
--------
>>> import numpy.ma as ma
>>> arr = np.zeros((2, 3), dtype=np.float32)
>>> arr
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> ma.masked_all_like(arr)
masked_array(data =
[[-- -- --]
[-- -- --]],
mask =
[[ True True True]
[ True True True]],
fill_value=1e+20)
The dtype of the masked array matches the dtype of `arr`.
>>> arr.dtype
dtype('float32')
>>> ma.masked_all_like(arr).dtype
dtype('float32')
"""
a = np.empty_like(arr).view(MaskedArray)
a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
return a
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
class _fromnxfunction:
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
An instance of `_fromnxfunction` can be called with the same parameters
as the wrapped NumPy function. The docstring of `newfunc` is adapted from
the wrapped function as well, see `getdoc`.
This class should not be used directly. Instead, one of its extensions that
provides support for a specific type of input should be used.
Parameters
----------
funcname : str
The name of the function to be adapted. The function should be
in the NumPy namespace (i.e. ``np.funcname``).
"""
def __init__(self, funcname):
self.__name__ = funcname
self.__doc__ = self.getdoc()
def getdoc(self):
"""
Retrieve the docstring and signature from the function.
The ``__doc__`` attribute of the function is used as the docstring for
the new masked array version of the function. A note on application
of the function to the mask is appended.
.. warning::
If the function docstring already contained a Notes section, the
new docstring will have two Notes sections instead of appending a note
to the existing section.
Parameters
----------
None
"""
npfunc = getattr(np, self.__name__, None)
doc = getattr(npfunc, '__doc__', None)
if doc:
sig = self.__name__ + ma.get_object_signature(npfunc)
locdoc = "Notes\n-----\nThe function is applied to both the _data"\
" and the _mask, if any."
return '\n'.join((sig, doc, locdoc))
return
def __call__(self, *args, **params):
pass
class _fromnxfunction_single(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single array
argument followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
if isinstance(x, ndarray):
_d = func(x.__array__(), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
else:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_seq(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single sequence
of arrays followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
_d = func(tuple([np.asarray(a) for a in x]), *args, **params)
_m = func(tuple([getmaskarray(a) for a in x]), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_args(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. The first non-array-like input marks the beginning of the
arguments that are passed verbatim for both the data and mask calls.
Array arguments are processed independently and the results are
returned in a list. If only one array is found, the return value is
just the processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
arrays = []
args = list(args)
while len(args) > 0 and issequence(args[0]):
arrays.append(args.pop(0))
res = []
for x in arrays:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
res.append(masked_array(_d, mask=_m))
if len(arrays) == 1:
return res[0]
return res
class _fromnxfunction_allargs(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. Similar to `_fromnxfunction_args` except that all args
are converted to arrays even if they are not so already. This makes
it possible to process scalars as 1-D arrays. Only keyword arguments
are passed through verbatim for the data and mask calls. Arrays
arguments are processed independently and the results are returned
in a list. If only one arg is present, the return value is just the
processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
res = []
for x in args:
_d = func(np.asarray(x), **params)
_m = func(getmaskarray(x), **params)
res.append(masked_array(_d, mask=_m))
if len(args) == 1:
return res[0]
return res
atleast_1d = _fromnxfunction_allargs('atleast_1d')
atleast_2d = _fromnxfunction_allargs('atleast_2d')
atleast_3d = _fromnxfunction_allargs('atleast_3d')
vstack = row_stack = _fromnxfunction_seq('vstack')
hstack = _fromnxfunction_seq('hstack')
column_stack = _fromnxfunction_seq('column_stack')
dstack = _fromnxfunction_seq('dstack')
hsplit = _fromnxfunction_single('hsplit')
diagflat = _fromnxfunction_single('diagflat')
#####--------------------------------------------------------------------------
#----
#####--------------------------------------------------------------------------
def flatten_inplace(seq):
"""Flatten a sequence in place."""
k = 0
while (k != len(seq)):
while hasattr(seq[k], '__iter__'):
seq[k:(k + 1)] = seq[k]
k += 1
return seq
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
(This docstring should be overwritten)
"""
arr = array(arr, copy=False, subok=True)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis, nd))
ind = [0] * (nd - 1)
i = np.zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = np.asarray(arr.shape).take(indlist)
i.put(indlist, ind)
j = i.copy()
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
asscalar = np.isscalar(res)
if not asscalar:
try:
len(res)
except TypeError:
asscalar = True
# Note: we shouldn't set the dtype of the output from the first result
# so we force the type to object, and build a list of dtypes. We'll
# just take the largest, to avoid some downcasting
dtypes = []
if asscalar:
dtypes.append(np.asarray(res).dtype)
outarr = zeros(outshape, object)
outarr[tuple(ind)] = res
Ntot = np.product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
dtypes.append(asarray(res).dtype)
k += 1
else:
res = array(res, copy=False, subok=True)
j = i.copy()
j[axis] = ([slice(None, None)] * res.ndim)
j.put(indlist, ind)
Ntot = np.product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = res.shape
dtypes.append(asarray(res).dtype)
outshape = flatten_inplace(outshape)
outarr = zeros(outshape, object)
outarr[tuple(flatten_inplace(j.tolist()))] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
j.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(flatten_inplace(j.tolist()))] = res
dtypes.append(asarray(res).dtype)
k += 1
max_dtypes = np.dtype(np.asarray(dtypes).max())
if not hasattr(arr, '_mask'):
result = np.asarray(outarr, dtype=max_dtypes)
else:
result = asarray(outarr, dtype=max_dtypes)
result.fill_value = ma.default_fill_value(result)
return result
apply_along_axis.__doc__ = np.apply_along_axis.__doc__
def apply_over_axes(func, a, axes):
"""
(This docstring will be overwritten)
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = ma.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
if apply_over_axes.__doc__ is not None:
apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
:np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
"""
Examples
--------
>>> a = ma.arange(24).reshape(2,3,4)
>>> a[:,0,1] = ma.masked
>>> a[:,1,:] = ma.masked
>>> print(a)
[[[0 -- 2 3]
[-- -- -- --]
[8 9 10 11]]
[[12 -- 14 15]
[-- -- -- --]
[20 21 22 23]]]
>>> print(ma.apply_over_axes(ma.sum, a, [0,2]))
[[[46]
[--]
[124]]]
Tuple axis arguments to ufuncs are equivalent:
>>> print(ma.sum(a, axis=(0,2)).reshape((1,-1,1)))
[[[46]
[--]
[124]]]
"""
def average(a, axis=None, weights=None, returned=False):
"""
Return the weighted average of array over the given axis.
Parameters
----------
a : array_like
Data to be averaged.
Masked entries are not taken into account in the computation.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
The importance that each element has in the computation of the average.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
weight equal to one. If `weights` is complex, the imaginary parts
are ignored.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
Default is False.
Returns
-------
average, [sum_of_weights] : (tuple of) scalar or MaskedArray
The average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `np.float64`
if `a` is of integer type and floats smaller than `float64`, or the
input data-type, otherwise. If returned, `sum_of_weights` is always
`float64`.
Examples
--------
>>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
>>> np.ma.average(a, weights=[3, 1, 0, 0])
1.25
>>> x = np.ma.arange(6.).reshape(3, 2)
>>> print(x)
[[ 0. 1.]
[ 2. 3.]
[ 4. 5.]]
>>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
... returned=True)
>>> print(avg)
[2.66666666667 3.66666666667]
"""
a = asarray(a)
m = getmask(a)
# inspired by 'average' in numpy/lib/function_base.py
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.count(axis))
else:
wgt = np.asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
if m is not nomask:
wgt = wgt*(~a.mask)
scl = wgt.sum(axis=axis, dtype=result_dtype)
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
if scl.shape != avg.shape:
scl = np.broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (None) is
to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True, and the input
is not already an `ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.10.0
Returns
-------
median : ndarray
A new array holding the result is returned unless out is
specified, in which case a reference to out is returned.
Return data-type is `float64` for integers and floats smaller than
`float64`, or the input data-type, otherwise.
See Also
--------
mean
Notes
-----
Given a vector ``V`` with ``N`` non masked values, the median of ``V``
is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
when ``N`` is even.
Examples
--------
>>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
>>> np.ma.median(x)
1.5
>>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
>>> np.ma.median(x)
2.5
>>> np.ma.median(x, axis=-1, overwrite_input=True)
masked_array(data = [ 2. 5.],
mask = False,
fill_value = 1e+20)
"""
if not hasattr(a, 'mask'):
m = np.median(getdata(a, subok=True), axis=axis,
out=out, overwrite_input=overwrite_input,
keepdims=keepdims)
if isinstance(m, np.ndarray) and 1 <= m.ndim:
return masked_array(m, copy=False)
else:
return m
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
if overwrite_input:
if axis is None:
asorted = a.ravel()
asorted.sort()
else:
a.sort(axis=axis)
asorted = a
else:
asorted = sort(a, axis=axis)
if axis is None:
axis = 0
elif axis < 0:
axis += asorted.ndim
if asorted.ndim == 1:
idx, odd = divmod(count(asorted), 2)
return asorted[idx + odd - 1 : idx + 1].mean(out=out)
counts = count(asorted, axis=axis)
h = counts // 2
# create indexing mesh grid for all but reduced axis
axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape)
if i != axis]
ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij')
# insert indices of low and high median
ind.insert(axis, np.maximum(0, h - 1))
low = asorted[tuple(ind)]
ind[axis] = h
high = asorted[tuple(ind)]
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
if asorted.ndim > 1:
np.copyto(low, high, where=odd)
elif odd:
low = high
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
s = np.ma.sum([low, high], axis=0, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
else:
s = np.ma.mean([low, high], axis=0, out=out)
return s
def compress_nd(x, axis=None):
"""Supress slices from multiple dimensions which contain masked values.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked, `x` is interpreted as a MaskedArray with `mask`
set to `nomask`.
axis : tuple of ints or int, optional
Which dimensions to supress slices from can be configured with this
parameter.
- If axis is a tuple of ints, those are the axes to supress slices from.
- If axis is an int, then that is the only axis to supress slices from.
- If axis is None, all axis are selected.
Returns
-------
compress_array : ndarray
The compressed array.
"""
x = asarray(x)
m = getmask(x)
# Set axis to tuple of ints
if isinstance(axis, int):
axis = (axis,)
elif axis is None:
axis = tuple(range(x.ndim))
elif not isinstance(axis, tuple):
raise ValueError('Invalid type for axis argument')
# Check axis input
axis = [ax + x.ndim if ax < 0 else ax for ax in axis]
if not all(0 <= ax < x.ndim for ax in axis):
raise ValueError("'axis' entry is out of bounds")
if len(axis) != len(set(axis)):
raise ValueError("duplicate value in 'axis'")
# Nothing is masked: return x
if m is nomask or not m.any():
return x._data
# All is masked: return empty
if m.all():
return nxarray([])
# Filter elements through boolean indexing
data = x._data
for ax in axis:
axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))
data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
return data
def compress_rowcols(x, axis=None):
"""
Suppress the rows and/or columns of a 2-D array that contain
masked values.
The suppression behavior is selected with the `axis` parameter.
- If axis is None, both rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. Default is None.
Returns
-------
compressed_array : ndarray
The compressed array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x
masked_array(data =
[[-- 1 2]
[-- 4 5]
[6 7 8]],
mask =
[[ True False False]
[ True False False]
[False False False]],
fill_value = 999999)
>>> np.ma.compress_rowcols(x)
array([[7, 8]])
>>> np.ma.compress_rowcols(x, 0)
array([[6, 7, 8]])
>>> np.ma.compress_rowcols(x, 1)
array([[1, 2],
[4, 5],
[7, 8]])
"""
if asarray(x).ndim != 2:
raise NotImplementedError("compress_rowcols works for 2D arrays only.")
return compress_nd(x, axis=axis)
def compress_rows(a):
"""
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
`extras.compress_rowcols` for details.
See Also
--------
extras.compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_rows works for 2D arrays only.")
return compress_rowcols(a, 0)
def compress_cols(a):
"""
Suppress whole columns of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
`extras.compress_rowcols` for details.
See Also
--------
extras.compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
def mask_rows(a, axis=None):
"""
Mask rows of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_rows(a)
masked_array(data =
[[0 0 0]
[-- -- --]
[0 0 0]],
mask =
[[False False False]
[ True True True]
[False False False]],
fill_value=999999)
"""
return mask_rowcols(a, 0)
def mask_cols(a, axis=None):
"""
Mask columns of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_cols(a)
masked_array(data =
[[0 -- 0]
[0 -- 0]
[0 -- 0]],
mask =
[[False True False]
[False True False]
[False True False]],
fill_value=999999)
"""
return mask_rowcols(a, 1)
#####--------------------------------------------------------------------------
#---- --- arraysetops ---
#####--------------------------------------------------------------------------
def ediff1d(arr, to_end=None, to_begin=None):
"""
Compute the differences between consecutive elements of an array.
This function is the equivalent of `numpy.ediff1d` that takes masked
values into account, see `numpy.ediff1d` for details.
See Also
--------
numpy.ediff1d : Equivalent function for ndarrays.
"""
arr = ma.asanyarray(arr).flat
ed = arr[1:] - arr[:-1]
arrays = [ed]
#
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
#
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in the common
# case where neither to_begin or to_end was given.
ed = hstack(arrays)
#
return ed
def unique(ar1, return_index=False, return_inverse=False):
"""
Finds the unique elements of an array.
Masked values are considered the same element (masked). The output array
is always a masked array. See `numpy.unique` for more details.
See Also
--------
numpy.unique : Equivalent function for ndarrays.
"""
output = np.unique(ar1,
return_index=return_index,
return_inverse=return_inverse)
if isinstance(output, tuple):
output = list(output)
output[0] = output[0].view(MaskedArray)
output = tuple(output)
else:
output = output.view(MaskedArray)
return output
def intersect1d(ar1, ar2, assume_unique=False):
"""
Returns the unique elements common to both arrays.
Masked values are considered equal one to the other.
The output is always a masked array.
See `numpy.intersect1d` for more details.
See Also
--------
numpy.intersect1d : Equivalent function for ndarrays.
Examples
--------
>>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
>>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
>>> intersect1d(x, y)
masked_array(data = [1 3 --],
mask = [False False True],
fill_value = 999999)
"""
if assume_unique:
aux = ma.concatenate((ar1, ar2))
else:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
aux = ma.concatenate((unique(ar1), unique(ar2)))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Set exclusive-or of 1-D arrays with unique elements.
The output is always a masked array. See `numpy.setxor1d` for more details.
See Also
--------
numpy.setxor1d : Equivalent function for ndarrays.
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = ma.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
auxf = aux.filled()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of an array is also present in a second
array.
The output is always a masked array. See `numpy.in1d` for more details.
See Also
--------
numpy.in1d : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.4.0
"""
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = unique(ar2)
ar = ma.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = ma.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
def union1d(ar1, ar2):
"""
Union of two arrays.
The output is always a masked array. See `numpy.union1d` for more details.
See also
--------
numpy.union1d : Equivalent function for ndarrays.
"""
return unique(ma.concatenate((ar1, ar2)))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Set difference of 1D arrays with unique elements.
The output is always a masked array. See `numpy.setdiff1d` for more
details.
See Also
--------
numpy.setdiff1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
>>> np.ma.setdiff1d(x, [1, 2])
masked_array(data = [3 --],
mask = [False True],
fill_value = 999999)
"""
if assume_unique:
ar1 = ma.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
###############################################################################
# Covariance #
###############################################################################
def _covhelper(x, y=None, rowvar=True, allow_masked=True):
"""
Private function for the computation of covariance and correlation
coefficients.
"""
x = ma.array(x, ndmin=2, copy=True, dtype=float)
xmask = ma.getmaskarray(x)
# Quick exit if we can't process masked data
if not allow_masked and xmask.any():
raise ValueError("Cannot process masked data.")
#
if x.shape[0] == 1:
rowvar = True
# Make sure that rowvar is either 0 or 1
rowvar = int(bool(rowvar))
axis = 1 - rowvar
if rowvar:
tup = (slice(None), None)
else:
tup = (None, slice(None))
#
if y is None:
xnotmask = np.logical_not(xmask).astype(int)
else:
y = array(y, copy=False, ndmin=2, dtype=float)
ymask = ma.getmaskarray(y)
if not allow_masked and ymask.any():
raise ValueError("Cannot process masked data.")
if xmask.any() or ymask.any():
if y.shape == x.shape:
# Define some common mask
common_mask = np.logical_or(xmask, ymask)
if common_mask is not nomask:
xmask = x._mask = y._mask = ymask = common_mask
x._sharedmask = False
y._sharedmask = False
x = ma.concatenate((x, y), axis)
xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
x -= x.mean(axis=rowvar)[tup]
return (x, xnotmask, rowvar)
def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
"""
Estimate the covariance matrix.
Except for the handling of missing data this function does the same as
`numpy.cov`. For more details and examples, see `numpy.cov`.
By default, masked values are recognized as such. If `x` and `y` have the
same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
``y[i,j]`` will also be masked.
Setting `allow_masked` to False will raise an exception if values are
missing in either of the input arrays.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N-1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. This keyword can be overridden by
the keyword ``ddof`` in numpy versions >= 1.5.
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises a `ValueError` exception when some values are missing.
ddof : {None, int}, optional
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
.. versionadded:: 1.5
Raises
------
ValueError
Raised if some values are missing and `allow_masked` is False.
See Also
--------
numpy.cov
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be an integer")
# Set up ddof
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
return result
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Except for the handling of missing data this function does the same as
`numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises an exception. Because `bias` is deprecated, this
argument needs to be treated as keyword only to avoid a warning.
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
See Also
--------
numpy.corrcoef : Equivalent function in top-level NumPy module.
cov : Estimate the covariance matrix.
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
msg = 'bias and ddof have no effect and are deprecated'
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn(msg, DeprecationWarning, stacklevel=2)
# Get the data
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
# Compute the covariance matrix
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1.
c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1.
c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
# Check whether we have a scalar
try:
diag = ma.diagonal(c)
except ValueError:
return 1
#
if xnotmask.all():
_denom = ma.sqrt(ma.multiply.outer(diag, diag))
else:
_denom = diagflat(diag)
_denom._sharedmask = False # We know return is always a copy
n = x.shape[1 - rowvar]
if rowvar:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
else:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(
vstack((x[:, i], x[:, j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
return c / _denom
#####--------------------------------------------------------------------------
#---- --- Concatenation helpers ---
#####--------------------------------------------------------------------------
class MAxisConcatenator(AxisConcatenator):
"""
Translate slice objects to concatenation along an axis.
For documentation on usage, see `mr_class`.
See Also
--------
mr_class
"""
def __init__(self, axis=0):
AxisConcatenator.__init__(self, axis, matrix=False)
def __getitem__(self, key):
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
final_dtypedescr = None
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = np.linspace(start, stop, num=size)
else:
newobj = np.arange(start, stop, step)
elif isinstance(key[k], str):
if (key[k] in 'rc'):
self.matrix = True
self.col = (key[k] == 'c')
continue
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("Unknown special directive")
elif type(key[k]) in np.ScalarType:
newobj = asarray([key[k]])
scalars.append(k)
scalar = True
else:
newobj = key[k]
objs.append(newobj)
if isinstance(newobj, ndarray) and not scalar:
if final_dtypedescr is None:
final_dtypedescr = newobj.dtype
elif newobj.dtype > final_dtypedescr:
final_dtypedescr = newobj.dtype
if final_dtypedescr is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtypedescr)
res = concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
class mr_class(MAxisConcatenator):
"""
Translate slice objects to concatenation along the first axis.
This is the masked array version of `lib.index_tricks.RClass`.
See Also
--------
lib.index_tricks.RClass
Examples
--------
>>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
"""
def __init__(self):
MAxisConcatenator.__init__(self, 0)
mr_ = mr_class()
#####--------------------------------------------------------------------------
#---- Find unmasked data ---
#####--------------------------------------------------------------------------
def flatnotmasked_edges(a):
"""
Find the indices of the first and last unmasked values.
Expects a 1-D `MaskedArray`, returns None if all values are masked.
Parameters
----------
a : array_like
Input 1-D `MaskedArray`
Returns
-------
edges : ndarray or None
The indices of first and last non-masked value in the array.
Returns None if all values are masked.
See Also
--------
flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 1-D arrays.
Examples
--------
>>> a = np.ma.arange(10)
>>> flatnotmasked_edges(a)
[0,-1]
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> flatnotmasked_edges(a)
array([3, 8])
>>> a[:] = np.ma.masked
>>> print(flatnotmasked_edges(ma))
None
"""
m = getmask(a)
if m is nomask or not np.any(m):
return np.array([0, a.size - 1])
unmasked = np.flatnonzero(~m)
if len(unmasked) > 0:
return unmasked[[0, -1]]
else:
return None
def notmasked_edges(a, axis=None):
"""
Find the indices of the first and last unmasked values along an axis.
If all values are masked, return None. Otherwise, return a list
of two tuples, corresponding to the indices of the first and last
unmasked values respectively.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
edges : ndarray or list
An array of start and end indexes if there are any masked data in
the array. If there are no masked data in the array, `edges` is a
list of the first and last index.
See Also
--------
flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous,
clump_masked, clump_unmasked
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> m = np.zeros_like(a)
>>> m[1:, 1:] = 1
>>> am = np.ma.array(a, mask=m)
>>> np.array(am[~am.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.notmasked_edges(ma)
array([0, 6])
"""
a = asarray(a)
if axis is None or a.ndim == 1:
return flatnotmasked_edges(a)
m = getmaskarray(a)
idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
def flatnotmasked_contiguous(a):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : narray
The input array.
Returns
-------
slice_list : list
A sorted sequence of slices (start index, end index).
See Also
--------
flatnotmasked_edges, notmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_contiguous(a)
slice(0, 10, None)
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_contiguous(a)
[slice(3, 5, None), slice(6, 9, None)]
>>> a[:] = np.ma.masked
>>> print(np.ma.flatnotmasked_edges(a))
None
"""
m = getmask(a)
if m is nomask:
return slice(0, a.size, None)
i = 0
result = []
for (k, g) in itertools.groupby(m.ravel()):
n = len(list(g))
if not k:
result.append(slice(i, i + n))
i += n
return result or None
def notmasked_contiguous(a, axis=None):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
endpoints : list
A list of slices (start and end indexes) of unmasked indexes
in the array.
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> mask = np.zeros_like(a)
>>> mask[1:, 1:] = 1
>>> ma = np.ma.array(a, mask=mask)
>>> np.array(ma[~ma.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.notmasked_contiguous(ma)
[slice(0, 4, None), slice(6, 7, None)]
"""
a = asarray(a)
nd = a.ndim
if nd > 2:
raise NotImplementedError("Currently limited to atmost 2D array.")
if axis is None or nd == 1:
return flatnotmasked_contiguous(a)
#
result = []
#
other = (axis + 1) % 2
idx = [0, 0]
idx[axis] = slice(None, None)
#
for i in range(a.shape[other]):
idx[other] = i
result.append(flatnotmasked_contiguous(a[idx]) or None)
return result
def _ezclump(mask):
"""
Finds the clumps (groups of data with the same values) for a 1D bool array.
Returns a series of slices.
"""
if mask.ndim > 1:
mask = mask.ravel()
idx = (mask[1:] ^ mask[:-1]).nonzero()
idx = idx[0] + 1
if mask[0]:
if len(idx) == 0:
return [slice(0, mask.size)]
r = [slice(0, idx[0])]
r.extend((slice(left, right)
for left, right in zip(idx[1:-1:2], idx[2::2])))
else:
if len(idx) == 0:
return []
r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]
if mask[-1]:
r.append(slice(idx[-1], mask.size))
return r
def clump_unmasked(a):
"""
Return list of slices corresponding to the unmasked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of unmasked
elements in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
notmasked_contiguous, clump_masked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_unmasked(a)
[slice(3, 6, None), slice(7, 8, None)]
"""
mask = getattr(a, '_mask', nomask)
if mask is nomask:
return [slice(0, a.size)]
return _ezclump(~mask)
def clump_masked(a):
"""
Returns a list of slices corresponding to the masked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of masked elements
in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
notmasked_contiguous, clump_unmasked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_masked(a)
[slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
"""
mask = ma.getmask(a)
if mask is nomask:
return []
return _ezclump(mask)
###############################################################################
# Polynomial fit #
###############################################################################
def vander(x, n=None):
"""
Masked values in the input array result in rows of zeros.
"""
_vander = np.vander(x, n)
m = getmask(x)
if m is not nomask:
_vander[m] = 0
return _vander
vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Any masked values in x is propagated in y, and vice-versa.
"""
x = asarray(x)
y = asarray(y)
m = getmask(x)
if y.ndim == 1:
m = mask_or(m, getmask(y))
elif y.ndim == 2:
my = getmask(mask_rows(y))
if my is not nomask:
m = mask_or(m, my[:, 0])
else:
raise TypeError("Expected a 1D or 2D array for y!")
if w is not None:
w = asarray(w)
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
m = mask_or(m, getmask(w))
if m is not nomask:
not_m = ~m
if w is not None:
w = w[not_m]
return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
else:
return np.polyfit(x, y, deg, rcond, full, w, cov)
polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
| bsd-3-clause |
decimalbell/redbase | third_party/src/gtest-1.7.0/scripts/fuse_gtest_files.py | 2577 | 8813 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| mit |
ueno/ibus | ibus/factory.py | 6 | 4043 | # vim:set et sts=4 sw=4:
#
# ibus - The Input Bus
#
# Copyright (c) 2007-2010 Peng Huang <shawn.p.huang@gmail.com>
# Copyright (c) 2007-2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
__all__ = (
"EngineFactoryBase",
"FactoryInfo"
)
import dbus
import object
import interface
from serializable import *
from exception import *
class EngineFactoryBase(object.Object):
def __init__(self, bus):
super(EngineFactoryBase, self).__init__()
self.__proxy = EngineFactoryProxy (self, bus.get_dbusconn(), "/org/freedesktop/IBus/Factory")
self.__bus = bus
def initialize(self):
pass
def uninitialize(self):
pass
def create_engine(self, engine_name):
raise IBusException("Can not create engine %s" % engine_name)
def do_destroy(self):
self.__proxy = None
self.__bus = None
super(EngineFactoryBase, self).do_destroy()
class EngineFactoryProxy(interface.IEngineFactory):
def __init__(self, factory, conn, object_path):
super(EngineFactoryProxy, self).__init__(conn, object_path)
self.__factory = factory
def GetInfo(self):
return self.__factory.get_info()
def Initialize(self):
return self.__factory.initialize()
def Uninitialize(self):
return self.__factory.uninitialize()
def CreateEngine(self, engine_name):
engine = self.__factory.create_engine(engine_name)
return engine.get_dbus_object()
def Destroy(self):
self.__factory.destroy()
self.__factory = None
self.remove_from_connection ()
class FactoryInfo(Serializable):
__gtype_name__ = "PYIBusFactoryInfo"
__NAME__ = "IBusFactoryInfo"
def __init__ (self, path=None, name=None, lang=None, icon=None, authors=None, credits=None):
super(FactoryInfo, self).__init__()
self.__path = path
self.__name = name
self.__lang = lang
self.__icon = icon
self.__authors = authors
self.__credits = credits
def get_path(self):
return self.__path
def get_name(self):
return self.__name
def get_lang(self):
return self.__lang
def get_icon(self):
return self.__icon
def get_authors(self):
return self.__authors
def get_credits(self):
return self.__credits
path = property(get_path)
name = property(get_name)
lang = property(get_lang)
icon = property(get_icon)
authors = property(get_authors)
credits = property(get_credits)
def serialize(self, struct):
super(FactoryInfo, self).serialize(struct)
struct.append (dbus.ObjectPath(self.__path))
struct.append (dbus.String(self.__name))
struct.append (dbus.String(self.__lang))
struct.append (dbus.String(self.__icon))
struct.append (dbus.String(self.__authors))
struct.append (dbus.String(self.__credits))
def deserialize(self, struct):
super(FactoryInfo, self).deserialize(struct)
if len(struct) < 5:
raise IBusException ("Can not deserialize IBusFactoryInfo")
self.__path = struct.pop(0)
self.__name = struct.pop(0)
self.__lang = struct.pop(0)
self.__icon = struct.pop(0)
self.__authors = struct.pop(0)
self.__credits = struct.pop(0)
| lgpl-2.1 |
CSC301H-Fall2013/JuakStore | site-packages/django/contrib/comments/models.py | 99 | 7729 | from django.conf import settings
from django.contrib.comments.managers import CommentManager
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
COMMENT_MAX_LENGTH = getattr(settings, 'COMMENT_MAX_LENGTH', 3000)
class BaseCommentAbstractModel(models.Model):
"""
An abstract base class that any custom comment models probably should
subclass.
"""
# Content-object field
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
# Metadata about the comment
site = models.ForeignKey(Site)
class Meta:
abstract = True
def get_content_object_url(self):
"""
Get a URL suitable for redirecting to the content object.
"""
return urlresolvers.reverse(
"comments-url-redirect",
args=(self.content_type_id, self.object_pk)
)
@python_2_unicode_compatible
class Comment(BaseCommentAbstractModel):
"""
A user comment about some object.
"""
# Who posted this comment? If ``user`` is set then it was an authenticated
# user; otherwise at least user_name should have been set and the comment
# was posted by a non-authenticated user.
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'),
blank=True, null=True, related_name="%(class)s_comments")
user_name = models.CharField(_("user's name"), max_length=50, blank=True)
user_email = models.EmailField(_("user's email address"), blank=True)
user_url = models.URLField(_("user's URL"), blank=True)
comment = models.TextField(_('comment'), max_length=COMMENT_MAX_LENGTH)
# Metadata about the comment
submit_date = models.DateTimeField(_('date/time submitted'), default=None)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True)
is_public = models.BooleanField(_('is public'), default=True,
help_text=_('Uncheck this box to make the comment effectively ' \
'disappear from the site.'))
is_removed = models.BooleanField(_('is removed'), default=False,
help_text=_('Check this box if the comment is inappropriate. ' \
'A "This comment has been removed" message will ' \
'be displayed instead.'))
# Manager
objects = CommentManager()
class Meta:
db_table = "django_comments"
ordering = ('submit_date',)
permissions = [("can_moderate", "Can moderate comments")]
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __str__(self):
return "%s: %s..." % (self.name, self.comment[:50])
def save(self, *args, **kwargs):
if self.submit_date is None:
self.submit_date = timezone.now()
super(Comment, self).save(*args, **kwargs)
def _get_userinfo(self):
"""
Get a dictionary that pulls together information about the poster
safely for both authenticated and non-authenticated comments.
This dict will have ``name``, ``email``, and ``url`` fields.
"""
if not hasattr(self, "_userinfo"):
userinfo = {
"name": self.user_name,
"email": self.user_email,
"url": self.user_url
}
if self.user_id:
u = self.user
if u.email:
userinfo["email"] = u.email
# If the user has a full name, use that for the user name.
# However, a given user_name overrides the raw user.username,
# so only use that if this comment has no associated name.
if u.get_full_name():
userinfo["name"] = self.user.get_full_name()
elif not self.user_name:
userinfo["name"] = u.get_username()
self._userinfo = userinfo
return self._userinfo
userinfo = property(_get_userinfo, doc=_get_userinfo.__doc__)
def _get_name(self):
return self.userinfo["name"]
def _set_name(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the name is read-only."))
self.user_name = val
name = property(_get_name, _set_name, doc="The name of the user who posted this comment")
def _get_email(self):
return self.userinfo["email"]
def _set_email(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the email is read-only."))
self.user_email = val
email = property(_get_email, _set_email, doc="The email of the user who posted this comment")
def _get_url(self):
return self.userinfo["url"]
def _set_url(self, val):
self.user_url = val
url = property(_get_url, _set_url, doc="The URL given by the user who posted this comment")
def get_absolute_url(self, anchor_pattern="#c%(id)s"):
return self.get_content_object_url() + (anchor_pattern % self.__dict__)
def get_as_text(self):
"""
Return this comment as plain text. Useful for emails.
"""
d = {
'user': self.user or self.name,
'date': self.submit_date,
'comment': self.comment,
'domain': self.site.domain,
'url': self.get_absolute_url()
}
return _('Posted by %(user)s at %(date)s\n\n%(comment)s\n\nhttp://%(domain)s%(url)s') % d
@python_2_unicode_compatible
class CommentFlag(models.Model):
"""
Records a flag on a comment. This is intentionally flexible; right now, a
flag could be:
* A "removal suggestion" -- where a user suggests a comment for (potential) removal.
* A "moderator deletion" -- used when a moderator deletes a comment.
You can (ab)use this model to add other flags, if needed. However, by
design users are only allowed to flag a comment with a given flag once;
if you want rating look elsewhere.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), related_name="comment_flags")
comment = models.ForeignKey(Comment, verbose_name=_('comment'), related_name="flags")
flag = models.CharField(_('flag'), max_length=30, db_index=True)
flag_date = models.DateTimeField(_('date'), default=None)
# Constants for flag types
SUGGEST_REMOVAL = "removal suggestion"
MODERATOR_DELETION = "moderator deletion"
MODERATOR_APPROVAL = "moderator approval"
class Meta:
db_table = 'django_comment_flags'
unique_together = [('user', 'comment', 'flag')]
verbose_name = _('comment flag')
verbose_name_plural = _('comment flags')
def __str__(self):
return "%s flag of comment ID %s by %s" % \
(self.flag, self.comment_id, self.user.get_username())
def save(self, *args, **kwargs):
if self.flag_date is None:
self.flag_date = timezone.now()
super(CommentFlag, self).save(*args, **kwargs)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.