repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
genestack/task-library | genestack/bio/variation/variation_indexer.py | 1 | 16269 | # -*- coding: utf-8 -*-
import re
import sys
import vcf
from genestack.bio import bio_meta_keys
from genestack.genestack_indexer import Indexer
from genestack.genestack_exceptions import GenestackException
from genestack.bio.reference_genome.reference_genome_file import ReferenceGenome
from genestack.metainfo import StringValue, Metainfo
from genestack.utils import normalize_contig_name
# FIXME find usages and remove this constants from here
DATA_LINK = Metainfo.DATA_URL
DATA_LOCATION = 'genestack.location:data'
EFF_FIELDS = ['Effect', 'Effect_Impact', 'Functional_Class', 'Codon_Change',
'Amino_Acid_Change', 'Amino_Acid_length', 'Gene_Name',
'Transcript_BioType', 'Gene_Coding', 'Transcript_ID',
'Exon_Rank', 'Genotype_Number', 'ERRORS', 'WARNINGS']
EFF_SCHEMA_FIELDS = [('eff_' + e.lower()) for e in EFF_FIELDS]
class RecordConverter(object):
BASE_SCHEMA = {
'CHROM': 'contig_s',
'LOCATION': 'location_iv',
'START': 'start_l',
'REF': 'ref_s_ci',
'QUAL': 'qual_f',
'ID': 'id_ss_ci',
'FILTER': 'filter_ss_ci',
'ALT': 'alt_ss_ci',
'ALT_COUNT': 'alt_len_i_ns',
'TYPE': 'type_ss_ci'
}
def __init__(self, vcf_reader):
"""
Record converter converts vcf.Record to feature.
``self.schema`` is filled with info from vcf.Reader.infos.
We can not create schema by analysing record items if we create record manually.
There are some differences in schema depending on how it was created:
- value types
- manual: can contain numbers, string and unicode values
- parsed: contain only strings
- single values
- manual: records always contain list of values
- parsed: have single value if it is single in schema
"""
self.range_limit = self.__get_range_limit(vcf_reader.infos)
self.schema = self.BASE_SCHEMA.copy()
for info in vcf_reader.infos.values():
if info.type == 'Float':
suffix = 'f'
elif info.type == 'Integer':
suffix = 'l'
elif info.type in ('Character', 'String'):
suffix = 's'
elif info.type == 'Flag':
suffix = 'b'
else:
raise GenestackException('Unexpected vcf info type for {}'.format(info))
# for single bool value num is 0
if str(info.num) not in ('0', '1'):
suffix += 's'
self.schema[info.id] = 'info_%s_%s' % (info.id, suffix)
@staticmethod
def __get_range_limit(infos):
"""
Return range limit from vcf.Reader.infos.
Get low and high range for field types.
``None`` mean that there is no limit.
Text searched by regular expression, wrong values will be silently ignored.
examples:
- (Range:1-10) 1.0, 10.0
- (Range:-10.33) None, 10.33
- (Range:10-) 10.0, None
"""
range_limit = {}
reg = re.compile('\(Range:([0-9]*\.?[0-9]*)-([0-9]*\.?[0-9]*)\)')
for key, val in infos.items():
match = reg.search(val.desc)
if match:
range_limit[key] = tuple(float(x) if x else None for x in match.group(1, 2))
return range_limit
def convert_record_to_feature(self, line_id, record):
"""
Convert vcf.Record to feature.
:param line_id: line id in file, first line of file has id=1
:type line_id: long
:param record: record
:type record: vcf.Record
:return:
"""
contig = normalize_contig_name(record.CHROM)
start = record.start
end = record.end
record_id = record.ID
ref = record.REF
substitutions = record.ALT
quality = record.QUAL
filter_field = record.FILTER
info = record.INFO
samples_format = record.FORMAT
samples = record.samples
data = {
'__id__': str(line_id),
'line_l': line_id,
'contig_s': contig,
'location_iv': str(start) + " " + str(end),
'start_l': start,
'ref_s_ci': ref,
'qual_f': quality
}
if record_id is not None and record_id != '.':
data['id_ss_ci'] = record_id.split(',')
if filter_field != '.':
data['filter_ss_ci'] = filter_field
data.update(self.__get_samples_info(samples_format, samples))
alt = list()
types = list()
for subst in substitutions:
sub = str(subst) if subst is not None else '.'
alt.append(sub)
types.append(self.__get_type(ref, sub))
data['alt_ss_ci'] = alt
data['alt_len_i_ns'] = len(alt)
data['type_ss_ci'] = types
'''For future use; I would prefer to use PyVCF methods instead of implementing my own.
But there is a slight difference in the results. Please review if these differences are critical.
if record.is_snp:
data['is_snp_b'] = True
if record.is_indel:
data['is_indel_b'] = True
if record.is_transition:
data['is_transition_b'] = True
if record.is_deletion:
data['is_deletion_b'] = True
if record.is_monomorphic:
data['is_monomorphic_b'] = True
data['var_type_s'] = record.var_type
data['var_subtype_s'] = record.var_subtype
'''
for key, value in info.items():
if value is None:
continue
if isinstance(value, list) and value[0] is None:
continue
if key not in self.schema:
typed_key = self.__get_typed_string(key, value)
self.schema[key] = typed_key
typed_key = self.schema[key]
if typed_key == 'info_EFF_ss':
for eff_line in value:
# TODO Here we blindly parse snp_eff line and believe that
# items are in the proper order,
# but we have not even checked snpEff version
# Seems that we should check snpEff version before doing such blind parsing
for i, val in enumerate(re.split('\(|\)|\|', eff_line)):
eff_key = EFF_SCHEMA_FIELDS[i]
eff_typed_key = 'info_splitted_' + eff_key + '_ss'
data.setdefault(eff_typed_key, []).append(val)
self.schema[eff_key] = eff_typed_key
# TODO info_EFF_ss is stored both as raw and as parsed,
# need to check that nobody rely on raw value
data[typed_key] = value
if isinstance(value, list):
key_base = self.__get_typed_string(key, value[0]) + '_ns'
low_limit, high_limit = self.range_limit.get(key, (None, None))
if low_limit:
value = [x for x in value if x >= low_limit]
if high_limit:
value = [x for x in value if x <= high_limit]
if value:
data['sorting_max_' + key_base] = max(value)
data['sorting_min_' + key_base] = min(value)
return data
def __get_samples_info(self, samples_format, samples):
info = {}
format_list = samples_format.split(':') if samples_format is not None else []
for s in samples:
info.setdefault('samples_info_names_ss_ci', []).append(s.sample)
for f in format_list:
val = self.__get_attribute_as_string(s.data, f)
info.setdefault('samples_info_' + f + '_ss', []).append(val)
return info
@staticmethod
def __get_attribute_as_string(data, attr):
val = getattr(data, attr, None)
if val is None:
return ''
if isinstance(val, list):
return ",".join(map(str, val))
return str(val)
@staticmethod
def __get_typed_string(key, value):
"""
Add solr suffix depending on value type
:param key: key
:type key: str
:param value: corresponding value
:type value: any
:return: solr key string
:rtype: str
"""
key = 'info_' + key
list_suffix = 's' if isinstance(value, list) else ''
v = value[0] if list_suffix else value
if v is None:
return None
if isinstance(v, basestring):
suffix = '_s'
elif isinstance(v, bool):
suffix = '_b'
elif isinstance(v, (int, long)):
suffix = '_l'
elif isinstance(v, float):
suffix = '_f'
else:
raise GenestackException("Unknown type for key %s: %s (%s)" % (key, v, type(v)))
return key + suffix + list_suffix
@staticmethod
def __get_type(ref, alt):
if alt == '.':
return 'MR'
if len(ref) == 1 and len(alt) == 1:
return 'SNP'
elif len(ref) == len(alt):
return 'MNP'
elif len(ref) < len(alt):
return 'INS'
else:
return 'DEL'
class VariationIndexer(object):
INDEXING_CHUNK_SIZE = 4000
QUERY_CHUNK_SIZE = 100
MAX_LINE_KEY = 'genestack.initialization:maxLine'
def __init__(self, target_file, reference_genome=None):
self.target_file = target_file
if reference_genome is None:
reference_genome = target_file.resolve_reference(
bio_meta_keys.REFERENCE_GENOME, ReferenceGenome
)
assert reference_genome is not None, "No reference genome found"
self.reference_genome = reference_genome
self.__schema = None
@property
def schema(self):
sys.stderr.write('"schema" attribute is deprecated, use RecordConvertor schema instead\n')
return self.__schema
def get_indexing_line_from(self):
line_from_value = self.target_file.get_metainfo().get_first_string(VariationIndexer.MAX_LINE_KEY)
try:
return int(line_from_value) if line_from_value is not None else 0
except ValueError:
return 0
def set_max_line(self, line_id):
self.target_file.replace_metainfo_value(VariationIndexer.MAX_LINE_KEY, StringValue(str(line_id)))
def iterate_features(self, vcf_reader, record_converter=None, line_from=0):
"""
Returns generator over features corresponding to vcf record in file.
If ``record_converter`` is not specified uses record converter based on this vcf file.
:param vcf_reader: vcf reader
:type vcf_reader: vcf.Reader
:param record_converter: converter from record to feature
:type record_converter: RecordConverter
:param line_from: first line that should be returned, use 0 for the whole file
:return: generator
"""
if record_converter is None:
record_converter = RecordConverter(vcf_reader)
self.__schema = record_converter.schema
for line_id, record in enumerate(vcf_reader, start=1):
if line_from > line_id:
continue
yield line_id, record_converter.convert_record_to_feature(line_id, record)
def get_indexer(self, file_to_index, record_converter=None):
"""
Return context manager to index records.
This indexer has two methods:
- ``index_record`` which accepts line_number and record
- ``index_feature`` which accepts feature
``index_record`` can be called only if record_converter is specified.
:param file_to_index: Genestack file instance
:param record_converter: record converter
:return: indexer
"""
process_features = self.process_features
set_max_line = self.set_max_line
set_initialization_version = self.__set_initialization_version
class RecordIndexer(object):
def __init__(self, file_to_index, record_converter):
self.__file = file_to_index
self.__inside_context = False
self.features = []
self.raw_features = []
self.record_converter = record_converter
self.__last_feature_line_id = None
def __enter__(self):
set_initialization_version()
self.__inside_context = True
self.indexer = Indexer(file_to_index)
self.indexer.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__flush(force=True)
self.indexer.__exit__(exc_type, exc_val, exc_tb)
self.__inside_context = False
def index_record(self, line, record):
if not self.record_converter:
raise GenestackException('Indexing record only possible if record converter is specified')
feature = self.record_converter.convert_record_to_feature(line, record)
self.index_feature(feature)
def index_feature(self, feature):
if not self.__inside_context:
raise GenestackException('RecordIndexer object must be used only inside a "with" statement')
self.raw_features.append(feature)
self.__last_feature_line_id = feature['line_l']
self.__flush()
def __flush(self, force=False):
limit = 0 if force else (VariationIndexer.QUERY_CHUNK_SIZE - 1)
if len(self.raw_features) > limit:
self.features.extend(process_features(self.raw_features))
self.raw_features = []
limit = 0 if force else (VariationIndexer.INDEXING_CHUNK_SIZE - 1)
if len(self.features) > limit:
self.indexer.index_records(self.features)
self.features = []
set_max_line(self.__last_feature_line_id)
return RecordIndexer(file_to_index, record_converter)
def create_index(self, file_name):
"""
Create index for vcf file.
Indexing progress is stored in metainfo, if file started for first time it is empty
and whole file will be indexing. Then record is send to server it metainfo will be updated.
Rerunning file in case of fail will proceed indexing from last point.
:param file_name: existing name of vcf file
:type file_name: str
:return: None
"""
with open(file_name) as f, self.get_indexer(self.target_file, record_converter=None) as indexer:
vcf_reader = vcf.Reader(f)
record_converter = RecordConverter(vcf_reader)
for line_id, feature in self.iterate_features(vcf_reader, record_converter=record_converter,
line_from=self.get_indexing_line_from()):
indexer.index_feature(feature)
def __set_initialization_version(self):
"""
Set version of initialization. This key required to support different versions.
"""
self.target_file.replace_metainfo_value('genestack.indexing:version', StringValue('splitEffAnnotations'))
# TODO: Remove this method if we decide not to index ReferenceGenome data
def __append_genome_features(self, mutation_list):
# code removed at commit f64cdf12ddd9a64ec5cbfdebaa1d01be24224239
pass
def process_features(self, features_list):
"""
This method can be overridden in children to process features before adding them to index.
:param features_list: list of features to be processed
:return: processed feature list
"""
# hack to support old api
if hasattr(self, 'process_record'):
import sys
sys.stderr.write('Warning! "process_record" method is deprecated use "process_features" instead\n')
return self.process_record(features_list)
else:
return features_list
| mit |
jonyroda97/redbot-amigosprovaveis | lib/youtube_dl/extractor/kontrtube.py | 60 | 2732 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
)
class KontrTubeIE(InfoExtractor):
IE_NAME = 'kontrtube'
IE_DESC = 'KontrTube.ru - Труба зовёт'
_VALID_URL = r'https?://(?:www\.)?kontrtube\.ru/videos/(?P<id>\d+)/(?P<display_id>[^/]+)/'
_TEST = {
'url': 'http://www.kontrtube.ru/videos/2678/nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag/',
'md5': '975a991a4926c9a85f383a736a2e6b80',
'info_dict': {
'id': '2678',
'display_id': 'nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag',
'ext': 'mp4',
'title': 'Над олимпийской деревней в Сочи поднят российский флаг',
'description': 'md5:80edc4c613d5887ae8ccf1d59432be41',
'thumbnail': 'http://www.kontrtube.ru/contents/videos_screenshots/2000/2678/preview.mp4.jpg',
'duration': 270,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(
url, display_id, 'Downloading page')
video_url = self._search_regex(
r"video_url\s*:\s*'(.+?)/?',", webpage, 'video URL')
thumbnail = self._search_regex(
r"preview_url\s*:\s*'(.+?)/?',", webpage, 'thumbnail', fatal=False)
title = self._html_search_regex(
r'(?s)<h2>(.+?)</h2>', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description')
duration = self._search_regex(
r'Длительность: <em>([^<]+)</em>', webpage, 'duration', fatal=False)
if duration:
duration = parse_duration(duration.replace('мин', 'min').replace('сек', 'sec'))
view_count = self._search_regex(
r'Просмотров: <em>([^<]+)</em>',
webpage, 'view count', fatal=False)
if view_count:
view_count = int_or_none(view_count.replace(' ', ''))
comment_count = int_or_none(self._search_regex(
r'Комментарии \((\d+)\)<', webpage, ' comment count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title,
'description': description,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
}
| gpl-3.0 |
meredith-digops/ansible | lib/ansible/modules/cloud/rackspace/rax_dns_record.py | 70 | 11652 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_dns_record
short_description: Manage DNS records on Rackspace Cloud DNS
description:
- Manage DNS records on Rackspace Cloud DNS
version_added: 1.5
options:
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
data:
description:
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
SRV/TXT
required: True
domain:
description:
- Domain name to create the record in. This is an invalid option when
type=PTR
loadbalancer:
description:
- Load Balancer ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
name:
description:
- FQDN record name to create
required: True
overwrite:
description:
- Add new records if data doesn't match, instead of updating existing
record with matching name. If there are already multiple records with
matching name and overwrite=true, this module will fail.
default: true
version_added: 2.1
priority:
description:
- Required for MX and SRV records, but forbidden for other record types.
If specified, must be an integer from 0 to 65535.
server:
description:
- Server ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
description:
- Time to live of record in seconds
default: 3600
type:
description:
- DNS record type
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
required: true
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
- To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Create DNS Records
hosts: all
gather_facts: False
tasks:
- name: Create A record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
domain: example.org
name: www.example.org
data: "{{ rax_accessipv4 }}"
type: A
register: a_record
- name: Create PTR record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
server: "{{ rax_id }}"
name: "{{ inventory_hostname }}"
region: DFW
register: ptr_record
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
name=None, server=None, state='present', ttl=7200):
changed = False
results = []
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if loadbalancer:
item = rax_find_loadbalancer(module, pyrax, loadbalancer)
elif server:
item = rax_find_server(module, pyrax, server)
if state == 'present':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
if record.ttl != ttl or record.name != name:
try:
dns.update_ptr_record(item, record, name, data, ttl)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
record.ttl = ttl
record.name = name
results.append(rax_to_dict(record))
break
else:
results.append(rax_to_dict(record))
break
if not results:
record = dict(name=name, type='PTR', data=data, ttl=ttl,
comment=comment)
try:
results = dns.add_ptr_records(item, [record])
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
elif state == 'absent':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
results.append(rax_to_dict(record))
break
if results:
try:
dns.delete_ptr_records(item, data)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
overwrite=True, priority=None, record_type='A',
state='present', ttl=7200):
"""Function for manipulating record types other than PTR"""
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not priority and record_type in ['MX', 'SRV']:
module.fail_json(msg='A "priority" attribute is required for '
'creating a MX or SRV record')
try:
domain = dns.find(name=domain)
except Exception as e:
module.fail_json(msg='%s' % e.message)
try:
if overwrite:
record = domain.find_record(record_type, name=name)
else:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='overwrite=true and there are multiple matching records')
except pyrax.exceptions.DomainRecordNotFound as e:
try:
record_data = {
'type': record_type,
'name': name,
'data': data,
'ttl': ttl
}
if comment:
record_data.update(dict(comment=comment))
if priority and record_type.upper() in ['MX', 'SRV']:
record_data.update(dict(priority=priority))
record = domain.add_records([record_data])[0]
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(record, 'comment', None):
update['comment'] = comment
if ttl != getattr(record, 'ttl', None):
update['ttl'] = ttl
if priority != getattr(record, 'priority', None):
update['priority'] = priority
if data != getattr(record, 'data', None):
update['data'] = data
if update:
try:
record.update(**update)
changed = True
record.get()
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=domain)
except Exception as e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotFound as e:
record = {}
pass
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='%s' % e.message)
if record:
try:
record.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, record=rax_to_dict(record))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
data=dict(required=True),
domain=dict(),
loadbalancer=dict(),
name=dict(required=True),
overwrite=dict(type='bool', default=True),
priority=dict(type='int'),
server=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
'SRV', 'TXT', 'PTR'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['server', 'loadbalancer', 'domain'],
],
required_one_of=[
['server', 'loadbalancer', 'domain'],
],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
data = module.params.get('data')
domain = module.params.get('domain')
loadbalancer = module.params.get('loadbalancer')
name = module.params.get('name')
overwrite = module.params.get('overwrite')
priority = module.params.get('priority')
server = module.params.get('server')
state = module.params.get('state')
ttl = module.params.get('ttl')
record_type = module.params.get('type')
setup_rax_module(module, pyrax, False)
if record_type.upper() == 'PTR':
if not server and not loadbalancer:
module.fail_json(msg='one of the following is required: '
'server,loadbalancer')
rax_dns_record_ptr(module, data=data, comment=comment,
loadbalancer=loadbalancer, name=name, server=server,
state=state, ttl=ttl)
else:
rax_dns_record(module, comment=comment, data=data, domain=domain,
name=name, overwrite=overwrite, priority=priority,
record_type=record_type, state=state, ttl=ttl)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
if __name__ == '__main__':
main()
| gpl-3.0 |
developerinlondon/ansible-modules-core | database/postgresql/postgresql_db.py | 112 | 11226 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: postgresql_db
short_description: Add or remove PostgreSQL databases from a remote host.
description:
- Add or remove PostgreSQL databases from a remote host.
version_added: "0.6"
options:
name:
description:
- name of the database to add or remove
required: true
default: null
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
owner:
description:
- Name of the role to set as owner of the database
required: false
default: null
port:
description:
- Database port to connect to.
required: false
default: 5432
template:
description:
- Template used to create the database
required: false
default: null
encoding:
description:
- Encoding of the database
required: false
default: null
encoding:
description:
- Encoding of the database
required: false
default: null
lc_collate:
description:
- Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
required: false
default: null
lc_ctype:
description:
- Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0) is used as template.
required: false
default: null
state:
description:
- The database state
required: false
default: present
choices: [ "present", "absent" ]
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author: "Lorin Hochstein (@lorin)"
'''
EXAMPLES = '''
# Create a new database with name "acme"
- postgresql_db: name=acme
# Create a new database with name "acme" and specific encoding and locale
# settings. If a template different from "template0" is specified, encoding
# and locale settings must match those of the template.
- postgresql_db: name=acme
encoding='UTF-8'
lc_collate='de_DE.UTF-8'
lc_ctype='de_DE.UTF-8'
template='template0'
'''
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, db, owner):
query = "ALTER DATABASE %s OWNER TO %s" % (
pg_quote_identifier(db, 'database'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query)
return True
def get_encoding_id(cursor, encoding):
query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
cursor.execute(query, {'encoding': encoding})
return cursor.fetchone()['encoding_id']
def get_db_info(cursor, db):
query = """
SELECT rolname AS owner,
pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
datcollate AS lc_collate, datctype AS lc_ctype
FROM pg_database JOIN pg_roles ON pg_roles.oid = pg_database.datdba
WHERE datname = %(db)s
"""
cursor.execute(query, {'db': db})
return cursor.fetchone()
def db_exists(cursor, db):
query = "SELECT * FROM pg_database WHERE datname=%(db)s"
cursor.execute(query, {'db': db})
return cursor.rowcount == 1
def db_delete(cursor, db):
if db_exists(cursor, db):
query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
cursor.execute(query)
return True
else:
return False
def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype)
if not db_exists(cursor, db):
query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
if owner:
query_fragments.append('OWNER %s' % pg_quote_identifier(owner, 'role'))
if template:
query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
if encoding:
query_fragments.append('ENCODING %(enc)s')
if lc_collate:
query_fragments.append('LC_COLLATE %(collate)s')
if lc_ctype:
query_fragments.append('LC_CTYPE %(ctype)s')
query = ' '.join(query_fragments)
cursor.execute(query, params)
return True
else:
db_info = get_db_info(cursor, db)
if (encoding and
get_encoding_id(cursor, encoding) != db_info['encoding_id']):
raise NotSupportedError(
'Changing database encoding is not supported. '
'Current encoding: %s' % db_info['encoding']
)
elif lc_collate and lc_collate != db_info['lc_collate']:
raise NotSupportedError(
'Changing LC_COLLATE is not supported. '
'Current LC_COLLATE: %s' % db_info['lc_collate']
)
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
raise NotSupportedError(
'Changing LC_CTYPE is not supported.'
'Current LC_CTYPE: %s' % db_info['lc_ctype']
)
elif owner and owner != db_info['owner']:
return set_owner(cursor, db, owner)
else:
return False
def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
if not db_exists(cursor, db):
return False
else:
db_info = get_db_info(cursor, db)
if (encoding and
get_encoding_id(cursor, encoding) != db_info['encoding_id']):
return False
elif lc_collate and lc_collate != db_info['lc_collate']:
return False
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
return False
elif owner and owner != db_info['owner']:
return False
else:
return True
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
port=dict(default="5432"),
db=dict(required=True, aliases=['name']),
owner=dict(default=""),
template=dict(default=""),
encoding=dict(default=""),
lc_collate=dict(default=""),
lc_ctype=dict(default=""),
state=dict(default="present", choices=["absent", "present"]),
),
supports_check_mode = True
)
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
db = module.params["db"]
port = module.params["port"]
owner = module.params["owner"]
template = module.params["template"]
encoding = module.params["encoding"]
lc_collate = module.params["lc_collate"]
lc_ctype = module.params["lc_ctype"]
state = module.params["state"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != '' )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try:
db_connection = psycopg2.connect(database="postgres", **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except Exception, e:
module.fail_json(msg="unable to connect to database: %s" % e)
try:
if module.check_mode:
if state == "absent":
changed = not db_exists(cursor, db)
elif state == "present":
changed = not db_matches(cursor, db, owner, template, encoding,
lc_collate, lc_ctype)
module.exit_json(changed=changed,db=db)
if state == "absent":
try:
changed = db_delete(cursor, db)
except SQLParseError, e:
module.fail_json(msg=str(e))
elif state == "present":
try:
changed = db_create(cursor, db, owner, template, encoding,
lc_collate, lc_ctype)
except SQLParseError, e:
module.fail_json(msg=str(e))
except NotSupportedError, e:
module.fail_json(msg=str(e))
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception, e:
module.fail_json(msg="Database query failed: %s" % e)
module.exit_json(changed=changed, db=db)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
if __name__ == '__main__':
main()
| gpl-3.0 |
zaffra/Donate | django/core/servers/basehttp.py | 12 | 24677 | """
BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21).
Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. Don't use it for production use.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import os
import re
import sys
import urllib
import warnings
from django.core.management.color import color_style
from django.utils.http import http_date
from django.utils._os import safe_join
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.views import static
__version__ = "0.1"
__all__ = ['WSGIServer','WSGIRequestHandler']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class WSGIServerException(Exception):
pass
class FileWrapper(object):
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers(object):
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if not isinstance(headers, list):
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return header_name.lower() in _hop_headers
class ServerHandler(object):
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = software_version
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 INTERNAL SERVER ERROR"
error_headers = [('Content-Type','text/plain')]
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def __init__(self, stdin, stdout, stderr, environ, multithread=True,
multiprocess=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""
Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will want to
redefine this method, such that it sets up callbacks in the event loop
to iterate over the data, and to call 'self.close()' once the response
is finished.
"""
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError, AttributeError, NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % http_date()
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert isinstance(data, str), "write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file1.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
self.headers['Content-Length'] = "0"
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
try:
self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
import traceback
start_response(self.error_status, self.error_headers[:], sys.exc_info())
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
try:
HTTPServer.server_bind(self)
except Exception, e:
raise WSGIServerException(e)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
self.style = color_style()
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico':
return
msg = "[%s] %s\n" % (self.log_date_time_string(), format % args)
# Utilize terminal colors, if available
if args[1][0] == '2':
# Put 2XX first, since it should be the common case
msg = self.style.HTTP_SUCCESS(msg)
elif args[1][0] == '1':
msg = self.style.HTTP_INFO(msg)
elif args[1] == '304':
msg = self.style.HTTP_NOT_MODIFIED(msg)
elif args[1][0] == '3':
msg = self.style.HTTP_REDIRECT(msg)
elif args[1] == '404':
msg = self.style.HTTP_NOT_FOUND(msg)
elif args[1][0] == '4':
msg = self.style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = self.style.HTTP_SERVER_ERROR(msg)
sys.stderr.write(msg)
class AdminMediaHandler(StaticFilesHandler):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the ADMIN_MEDIA_PREFIX setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
"""
def get_media_dir(self):
import django
return os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
def get_media_url(self):
from django.conf import settings
return settings.ADMIN_MEDIA_PREFIX
def file_path(self, url):
"""
Returns the path to the media file on disk for the given URL.
The passed URL is assumed to begin with ``media_url``. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ``media_url``.
relative_url = url[len(self.media_url[2]):]
relative_path = urllib.url2pathname(relative_url)
return safe_join(self.media_dir, relative_path)
def serve(self, request):
document_root, path = os.path.split(self.file_path(request.path))
return static.serve(request, path,
document_root=document_root, insecure=True)
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the media_url
* the request's path isn't under the media path
"""
return path.startswith(self.media_url[2]) and not self.media_url[1]
def run(addr, port, wsgi_handler):
server_address = (addr, port)
httpd = WSGIServer(server_address, WSGIRequestHandler)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
| bsd-3-clause |
edx/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/shortcuts.py | 317 | 1161 | import cStringIO, zipfile
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
def compress_kml(kml):
"Returns compressed KMZ from the given KML string."
kmz = cStringIO.StringIO()
zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED)
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
zf.close()
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Renders the response as KML (using the correct MIME type)."
return HttpResponse(loader.render_to_string(*args, **kwargs),
mimetype='application/vnd.google-earth.kml+xml')
def render_to_kmz(*args, **kwargs):
"""
Compresses the KML content and returns as KMZ (using the correct
MIME type).
"""
return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)),
mimetype='application/vnd.google-earth.kmz')
def render_to_text(*args, **kwargs):
"Renders the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs),
mimetype='text/plain')
| gpl-3.0 |
tealover/nova | nova/virt/vmwareapi/io_util.py | 14 | 6322 | # Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility classes for defining the time saving transfer of data from the reader
to the write using a LightQueue as a Pipe between the reader and the writer.
"""
from eventlet import event
from eventlet import greenthread
from eventlet import queue
from oslo_log import log as logging
from nova import exception
from nova.i18n import _, _LE
from nova import image
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
IO_THREAD_SLEEP_TIME = .01
GLANCE_POLL_INTERVAL = 5
class ThreadSafePipe(queue.LightQueue):
"""The pipe to hold the data which the reader writes to and the writer
reads from.
"""
def __init__(self, maxsize, transfer_size):
queue.LightQueue.__init__(self, maxsize)
self.transfer_size = transfer_size
self.transferred = 0
def read(self, chunk_size):
"""Read data from the pipe.
Chunksize if ignored for we have ensured
that the data chunks written to the pipe by readers is the same as the
chunks asked for by the Writer.
"""
if self.transfer_size == 0 or self.transferred < self.transfer_size:
data_item = self.get()
self.transferred += len(data_item)
return data_item
else:
return ""
def write(self, data):
"""Put a data item in the pipe."""
self.put(data)
def seek(self, offset, whence=0):
"""Set the file's current position at the offset."""
pass
def tell(self):
"""Get size of the file to be read."""
return self.transfer_size
def close(self):
"""A place-holder to maintain consistency."""
pass
class GlanceWriteThread(object):
"""Ensures that image data is written to in the glance client and that
it is in correct ('active')state.
"""
def __init__(self, context, input, image_id,
image_meta=None):
if not image_meta:
image_meta = {}
self.context = context
self.input = input
self.image_id = image_id
self.image_meta = image_meta
self._running = False
def start(self):
self.done = event.Event()
def _inner():
"""Function to do the image data transfer through an update
and thereon checks if the state is 'active'.
"""
try:
IMAGE_API.update(self.context,
self.image_id,
self.image_meta,
data=self.input)
self._running = True
except exception.ImageNotAuthorized as exc:
self.done.send_exception(exc)
while self._running:
try:
image_meta = IMAGE_API.get(self.context,
self.image_id)
image_status = image_meta.get("status")
if image_status == "active":
self.stop()
self.done.send(True)
# If the state is killed, then raise an exception.
elif image_status == "killed":
self.stop()
msg = (_("Glance image %s is in killed state") %
self.image_id)
LOG.error(msg)
self.done.send_exception(exception.NovaException(msg))
elif image_status in ["saving", "queued"]:
greenthread.sleep(GLANCE_POLL_INTERVAL)
else:
self.stop()
msg = _("Glance image "
"%(image_id)s is in unknown state "
"- %(state)s") % {
"image_id": self.image_id,
"state": image_status}
LOG.error(msg)
self.done.send_exception(exception.NovaException(msg))
except Exception as exc:
self.stop()
self.done.send_exception(exc)
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
def close(self):
pass
class IOThread(object):
"""Class that reads chunks from the input file and writes them to the
output file till the transfer is completely done.
"""
def __init__(self, input, output):
self.input = input
self.output = output
self._running = False
self.got_exception = False
def start(self):
self.done = event.Event()
def _inner():
"""Read data from the input and write the same to the output
until the transfer completes.
"""
self._running = True
while self._running:
try:
data = self.input.read(None)
if not data:
self.stop()
self.done.send(True)
self.output.write(data)
greenthread.sleep(IO_THREAD_SLEEP_TIME)
except Exception as exc:
self.stop()
LOG.exception(_LE('Read/Write data failed'))
self.done.send_exception(exc)
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
| apache-2.0 |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/types/campaign_label_service.py | 1 | 4908 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.resources.types import campaign_label
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.services',
marshal='google.ads.googleads.v6',
manifest={
'GetCampaignLabelRequest',
'MutateCampaignLabelsRequest',
'CampaignLabelOperation',
'MutateCampaignLabelsResponse',
'MutateCampaignLabelResult',
},
)
class GetCampaignLabelRequest(proto.Message):
r"""Request message for
[CampaignLabelService.GetCampaignLabel][google.ads.googleads.v6.services.CampaignLabelService.GetCampaignLabel].
Attributes:
resource_name (str):
Required. The resource name of the campaign-
abel relationship to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateCampaignLabelsRequest(proto.Message):
r"""Request message for
[CampaignLabelService.MutateCampaignLabels][google.ads.googleads.v6.services.CampaignLabelService.MutateCampaignLabels].
Attributes:
customer_id (str):
Required. ID of the customer whose campaign-
abel relationships are being modified.
operations (Sequence[google.ads.googleads.v6.services.types.CampaignLabelOperation]):
Required. The list of operations to perform
on campaign-label relationships.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='CampaignLabelOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
class CampaignLabelOperation(proto.Message):
r"""A single operation (create, remove) on a campaign-label
relationship.
Attributes:
create (google.ads.googleads.v6.resources.types.CampaignLabel):
Create operation: No resource name is
expected for the new campaign-label
relationship.
remove (str):
Remove operation: A resource name for the campaign-label
relationship being removed, in this format:
``customers/{customer_id}/campaignLabels/{campaign_id}~{label_id}``
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=campaign_label.CampaignLabel,
)
remove = proto.Field(
proto.STRING,
number=2,
oneof='operation',
)
class MutateCampaignLabelsResponse(proto.Message):
r"""Response message for a campaign labels mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v6.services.types.MutateCampaignLabelResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateCampaignLabelResult',
)
class MutateCampaignLabelResult(proto.Message):
r"""The result for a campaign label mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
XiaodunServerGroup/medicalmooc | cms/djangoapps/contentstore/features/component.py | 7 | 6270 | #pylint: disable=C0111
#pylint: disable=W0621
# Lettuce formats proposed definitions for unimplemented steps with the
# argument name "step" instead of "_step" and pylint does not like that.
#pylint: disable=W0613
from lettuce import world, step
from nose.tools import assert_true, assert_in # pylint: disable=E0611
DISPLAY_NAME = "Display Name"
@step(u'I add this type of single step component:$')
def add_a_single_step_component(step):
for step_hash in step.hashes:
component = step_hash['Component']
assert_in(component, ['Discussion', 'Video'])
world.create_component_instance(
step=step,
category='{}'.format(component.lower()),
)
@step(u'I see this type of single step component:$')
def see_a_single_step_component(step):
for step_hash in step.hashes:
component = step_hash['Component']
assert_in(component, ['Discussion', 'Video'])
component_css = 'div.xmodule_{}Module'.format(component)
assert_true(world.is_css_present(component_css),
"{} couldn't be found".format(component))
@step(u'I add this type of( Advanced)? (HTML|Problem) component:$')
def add_a_multi_step_component(step, is_advanced, category):
for step_hash in step.hashes:
world.create_component_instance(
step=step,
category='{}'.format(category.lower()),
component_type=step_hash['Component'],
is_advanced=bool(is_advanced),
)
@step(u'I see (HTML|Problem) components in this order:')
def see_a_multi_step_component(step, category):
# Wait for all components to finish rendering
selector = 'li.component div.xblock-student_view'
world.wait_for(lambda _: len(world.css_find(selector)) == len(step.hashes))
for idx, step_hash in enumerate(step.hashes):
if category == 'HTML':
html_matcher = {
'Text':
'\n \n',
'Announcement':
'<p> Words of encouragement! This is a short note that most students will read. </p>',
'Zooming Image':
'<h2>ZOOMING DIAGRAMS</h2>',
'E-text Written in LaTeX':
'<h2>Example: E-text page</h2>',
}
actual_html = world.css_html(selector, index=idx)
assert_in(html_matcher[step_hash['Component']], actual_html)
else:
actual_text = world.css_text(selector, index=idx)
assert_in(step_hash['Component'].upper(), actual_text)
@step(u'I see a "([^"]*)" Problem component$')
def see_a_problem_component(step, category):
component_css = 'div.xmodule_CapaModule'
assert_true(world.is_css_present(component_css),
'No problem was added to the unit.')
problem_css = 'li.component div.xblock-student_view'
actual_text = world.css_text(problem_css)
assert_in(category.upper(), actual_text)
@step(u'I add a "([^"]*)" "([^"]*)" component$')
def add_component_category(step, component, category):
assert category in ('single step', 'HTML', 'Problem', 'Advanced Problem')
given_string = 'I add this type of {} component:'.format(category)
step.given('{}\n{}\n{}'.format(given_string, '|Component|', '|{}|'.format(component)))
@step(u'I delete all components$')
def delete_all_components(step):
count = len(world.css_find('ol.components li.component'))
step.given('I delete "' + str(count) + '" component')
@step(u'I delete "([^"]*)" component$')
def delete_components(step, number):
world.wait_for_xmodule()
delete_btn_css = 'a.delete-button'
prompt_css = 'div#prompt-warning'
btn_css = '{} a.button.action-primary'.format(prompt_css)
saving_mini_css = 'div#page-notification .wrapper-notification-mini'
for _ in range(int(number)):
world.css_click(delete_btn_css)
assert_true(
world.is_css_present('{}.is-shown'.format(prompt_css)),
msg='Waiting for the confirmation prompt to be shown')
# Pressing the button via css was not working reliably for the last component
# when run in Chrome.
if world.browser.driver_name is 'Chrome':
world.browser.execute_script("$('{}').click()".format(btn_css))
else:
world.css_click(btn_css)
# Wait for the saving notification to pop up then disappear
if world.is_css_present('{}.is-shown'.format(saving_mini_css)):
world.css_find('{}.is-hiding'.format(saving_mini_css))
@step(u'I see no components')
def see_no_components(steps):
assert world.is_css_not_present('li.component')
@step(u'I delete a component')
def delete_one_component(step):
world.css_click('a.delete-button')
@step(u'I edit and save a component')
def edit_and_save_component(step):
world.css_click('.edit-button')
world.css_click('.save-button')
@step(u'I duplicate the (first|second|third) component$')
def duplicated_component(step, ordinal):
ord_map = {
"first": 0,
"second": 1,
"third": 2,
}
index = ord_map[ordinal]
duplicate_btn_css = 'a.duplicate-button'
world.css_click(duplicate_btn_css, int(index))
@step(u'I see a Problem component with display name "([^"]*)" in position "([^"]*)"$')
def see_component_in_position(step, display_name, index):
component_css = 'div.xmodule_CapaModule'
def find_problem(_driver):
return world.css_text(component_css, int(index)).startswith(display_name.upper())
world.wait_for(find_problem, timeout_msg='Did not find the duplicated problem')
@step(u'I see the display name is "([^"]*)"')
def check_component_display_name(step, display_name):
label = world.css_text(".component-header")
assert display_name == label
@step(u'I change the display name to "([^"]*)"')
def change_display_name(step, display_name):
world.edit_component_and_select_settings()
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, display_name)
world.save_component(step)
@step(u'I unset the display name')
def unset_display_name(step):
world.edit_component_and_select_settings()
world.revert_setting_entry(DISPLAY_NAME)
world.save_component(step)
| agpl-3.0 |
billy-inn/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
etashjian/ECE757-final | src/arch/x86/isa/insts/simd128/integer/data_transfer/move_mask.py | 91 | 2266 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PMOVMSKB_R_XMM {
limm reg, 0
movsign reg, xmmlm, size=1, ext=0
movsign reg, xmmhm, size=1, ext=1
};
'''
| bsd-3-clause |
DBrianKimmel/PyHouse | Project/src/_test/twisted/check-certificate.py | 1 | 1185 | #!/usr/bin/env python3
#
# call with a host url to see if we know the TLS certificate
# from __future__ import print_function
import sys
from twisted.internet import defer, endpoints, protocol, ssl, task, error
def main(reactor, host, port=443):
options = ssl.optionsForClientTLS(hostname=host)
port = int(port)
class ShowCertificate(protocol.Protocol):
def connectionMade(self):
self.transport.write(b"GET / HTTP/1.0\r\n\r\n")
self.done = defer.Deferred()
def dataReceived(self, data):
l_certificate = ssl.Certificate(self.transport.getPeerCertificate())
print("OK:", l_certificate)
self.transport.abortConnection()
def connectionLost(self, reason):
print("Lost. {}".format(reason.value))
if not reason.check(error.ConnectionClosed):
print("BAD:", reason.value)
self.done.callback(None)
return endpoints.connectProtocol(
endpoints.SSL4ClientEndpoint(reactor, host, port, options),
ShowCertificate()
).addCallback(lambda protocol: protocol.done)
task.react(main, sys.argv[1:])
# ## END DBK
| mit |
nicolasmiller/pyculiarity | setup.py | 1 | 1256 | """
Usage details and source available here: https://github.com/nicolasmiller/pyculiarity.
The original R source and examples are available here: https://github.com/twitter/AnomalyDetection.
Copyright and License
Python port Copyright 2015 Nicolas Steven Miller
Original R source Copyright 2015 Twitter, Inc and other contributors
Licensed under the GPLv3
"""
from setuptools import setup, find_packages
setup(
name='pyculiarity',
version='0.0.7',
description='A Python port of Twitter\'s AnomalyDetection R Package.',
long_description=__doc__,
url='https://github.com/nicolasmiller/pyculiarity',
author='Nicolas Steven Miller',
author_email='nicolasmiller@gmail.com',
license='GPL',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2.7',
],
keywords='data anomaly detection pandas timeseries',
packages=['pyculiarity'],
install_requires=['numpy', 'scipy', 'pandas', 'pytz',
'statsmodels', 'rstl'],
extras_require={
'test': ['nose', 'mock']
}
)
| gpl-3.0 |
beckett1124/Paddle | demo/sentiment/trainer_config.py | 7 | 1453 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sentiment_net import *
from paddle.trainer_config_helpers import *
# whether this config is used for test
is_test = get_config_arg('is_test', bool, False)
# whether this config is used for prediction
is_predict = get_config_arg('is_predict', bool, False)
data_dir = "./data/pre-imdb"
dict_dim, class_dim = sentiment_data(data_dir, is_test, is_predict)
################## Algorithm Config #####################
settings(
batch_size=128,
learning_rate=2e-3,
learning_method=AdamOptimizer(),
average_window=0.5,
regularization=L2Regularization(8e-4),
gradient_clipping_threshold=25)
#################### Network Config ######################
stacked_lstm_net(
dict_dim, class_dim=class_dim, stacked_num=3, is_predict=is_predict)
# bidirectional_lstm_net(dict_dim, class_dim=class_dim, is_predict=is_predict)
| apache-2.0 |
liuqr/edx-xiaodun | cms/djangoapps/contentstore/features/course-updates.py | 13 | 3799 | #pylint: disable=C0111
#pylint: disable=W0621
from lettuce import world, step
from selenium.webdriver.common.keys import Keys
from common import type_in_codemirror
from nose.tools import assert_in # pylint: disable=E0611
@step(u'I go to the course updates page')
def go_to_updates(_step):
menu_css = 'li.nav-course-courseware'
updates_css = 'li.nav-course-courseware-updates a'
world.css_click(menu_css)
world.css_click(updates_css)
@step(u'I add a new update with the text "([^"]*)"$')
def add_update(_step, text):
update_css = 'a.new-update-button'
world.css_click(update_css)
change_text(text)
@step(u'I should see the update "([^"]*)"$')
def check_update(_step, text):
update_css = 'div.update-contents'
update_html = world.css_find(update_css).html
assert_in(text, update_html)
@step(u'I should not see the update "([^"]*)"$')
def check_no_update(_step, text):
update_css = 'div.update-contents'
assert world.is_css_not_present(update_css)
@step(u'I modify the text to "([^"]*)"$')
def modify_update(_step, text):
button_css = 'div.post-preview a.edit-button'
world.css_click(button_css)
change_text(text)
@step(u'I change the update from "([^"]*)" to "([^"]*)"$')
def change_existing_update(_step, before, after):
verify_text_in_editor_and_update('div.post-preview a.edit-button', before, after)
@step(u'I change the handout from "([^"]*)" to "([^"]*)"$')
def change_existing_handout(_step, before, after):
verify_text_in_editor_and_update('div.course-handouts a.edit-button', before, after)
@step(u'I delete the update$')
def click_button(_step):
button_css = 'div.post-preview a.delete-button'
world.css_click(button_css)
@step(u'I edit the date to "([^"]*)"$')
def change_date(_step, new_date):
button_css = 'div.post-preview a.edit-button'
world.css_click(button_css)
date_css = 'input.date'
date = world.css_find(date_css)
for i in range(len(date.value)):
date._element.send_keys(Keys.END, Keys.BACK_SPACE)
date._element.send_keys(new_date)
save_css = 'a.save-button'
world.css_click(save_css)
@step(u'I should see the date "([^"]*)"$')
def check_date(_step, date):
date_css = 'span.date-display'
assert date == world.css_html(date_css)
@step(u'I modify the handout to "([^"]*)"$')
def edit_handouts(_step, text):
edit_css = 'div.course-handouts > a.edit-button'
world.css_click(edit_css)
change_text(text)
@step(u'I see the handout "([^"]*)"$')
def check_handout(_step, handout):
handout_css = 'div.handouts-content'
assert handout in world.css_html(handout_css)
@step(u'I see the handout error text')
def check_handout_error(_step):
handout_error_css = 'div#handout_error'
assert world.css_has_class(handout_error_css, 'is-shown')
@step(u'I see handout save button disabled')
def check_handout_error(_step):
handout_save_button = 'form.edit-handouts-form a.save-button'
assert world.css_has_class(handout_save_button, 'is-disabled')
@step(u'I edit the handout to "([^"]*)"$')
def edit_handouts(_step, text):
type_in_codemirror(0, text)
@step(u'I see handout save button re-enabled')
def check_handout_error(_step):
handout_save_button = 'form.edit-handouts-form a.save-button'
assert not world.css_has_class(handout_save_button, 'is-disabled')
@step(u'I save handout edit')
def check_handout_error(_step):
save_css = 'a.save-button'
world.css_click(save_css)
def change_text(text):
type_in_codemirror(0, text)
save_css = 'a.save-button'
world.css_click(save_css)
def verify_text_in_editor_and_update(button_css, before, after):
world.css_click(button_css)
text = world.css_find(".cm-string").html
assert before in text
change_text(after)
| agpl-3.0 |
jhayworth/config | .emacs.d/elpy/rpc-venv/local/lib/python2.7/site-packages/yapf/yapflib/blank_line_calculator.py | 4 | 6343 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculate the number of blank lines between top-level entities.
Calculates how many blank lines we need between classes, functions, and other
entities at the same level.
CalculateBlankLines(): the main function exported by this module.
Annotations:
newlines: The number of newlines required before the node.
"""
from yapf.yapflib import py3compat
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
from yapf.yapflib import style
_NO_BLANK_LINES = 1
_ONE_BLANK_LINE = 2
_TWO_BLANK_LINES = 3
_PYTHON_STATEMENTS = frozenset({
'small_stmt', 'expr_stmt', 'print_stmt', 'del_stmt', 'pass_stmt',
'break_stmt', 'continue_stmt', 'return_stmt', 'raise_stmt', 'yield_stmt',
'import_stmt', 'global_stmt', 'exec_stmt', 'assert_stmt', 'if_stmt',
'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt', 'nonlocal_stmt',
'async_stmt', 'simple_stmt'
})
def CalculateBlankLines(tree):
"""Run the blank line calculator visitor over the tree.
This modifies the tree in place.
Arguments:
tree: the top-level pytree node to annotate with subtypes.
"""
blank_line_calculator = _BlankLineCalculator()
blank_line_calculator.Visit(tree)
class _BlankLineCalculator(pytree_visitor.PyTreeVisitor):
"""_BlankLineCalculator - see file-level docstring for a description."""
def __init__(self):
self.class_level = 0
self.function_level = 0
self.last_comment_lineno = 0
self.last_was_decorator = False
self.last_was_class_or_function = False
def Visit_simple_stmt(self, node): # pylint: disable=invalid-name
self.DefaultNodeVisit(node)
if pytree_utils.NodeName(node.children[0]) == 'COMMENT':
self.last_comment_lineno = node.children[0].lineno
def Visit_decorator(self, node): # pylint: disable=invalid-name
if (self.last_comment_lineno and
self.last_comment_lineno == node.children[0].lineno - 1):
self._SetNumNewlines(node.children[0], _NO_BLANK_LINES)
else:
self._SetNumNewlines(node.children[0], self._GetNumNewlines(node))
for child in node.children:
self.Visit(child)
self.last_was_decorator = True
def Visit_classdef(self, node): # pylint: disable=invalid-name
self.last_was_class_or_function = False
index = self._SetBlankLinesBetweenCommentAndClassFunc(node)
self.last_was_decorator = False
self.class_level += 1
for child in node.children[index:]:
self.Visit(child)
self.class_level -= 1
self.last_was_class_or_function = True
def Visit_funcdef(self, node): # pylint: disable=invalid-name
self.last_was_class_or_function = False
index = self._SetBlankLinesBetweenCommentAndClassFunc(node)
if _AsyncFunction(node):
index = self._SetBlankLinesBetweenCommentAndClassFunc(
node.prev_sibling.parent)
self._SetNumNewlines(node.children[0], None)
else:
index = self._SetBlankLinesBetweenCommentAndClassFunc(node)
self.last_was_decorator = False
self.function_level += 1
for child in node.children[index:]:
self.Visit(child)
self.function_level -= 1
self.last_was_class_or_function = True
def DefaultNodeVisit(self, node):
"""Override the default visitor for Node.
This will set the blank lines required if the last entity was a class or
function.
Arguments:
node: (pytree.Node) The node to visit.
"""
if self.last_was_class_or_function:
if pytree_utils.NodeName(node) in _PYTHON_STATEMENTS:
leaf = pytree_utils.FirstLeafNode(node)
self._SetNumNewlines(leaf, self._GetNumNewlines(leaf))
self.last_was_class_or_function = False
super(_BlankLineCalculator, self).DefaultNodeVisit(node)
def _SetBlankLinesBetweenCommentAndClassFunc(self, node):
"""Set the number of blanks between a comment and class or func definition.
Class and function definitions have leading comments as children of the
classdef and functdef nodes.
Arguments:
node: (pytree.Node) The classdef or funcdef node.
Returns:
The index of the first child past the comment nodes.
"""
index = 0
while pytree_utils.IsCommentStatement(node.children[index]):
# Standalone comments are wrapped in a simple_stmt node with the comment
# node as its only child.
self.Visit(node.children[index].children[0])
if not self.last_was_decorator:
self._SetNumNewlines(node.children[index].children[0], _ONE_BLANK_LINE)
index += 1
if (index and node.children[index].lineno -
1 == node.children[index - 1].children[0].lineno):
self._SetNumNewlines(node.children[index], _NO_BLANK_LINES)
else:
if self.last_comment_lineno + 1 == node.children[index].lineno:
num_newlines = _NO_BLANK_LINES
else:
num_newlines = self._GetNumNewlines(node)
self._SetNumNewlines(node.children[index], num_newlines)
return index
def _GetNumNewlines(self, node):
if self.last_was_decorator:
return _NO_BLANK_LINES
elif self._IsTopLevel(node):
return 1 + style.Get('BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION')
return _ONE_BLANK_LINE
def _SetNumNewlines(self, node, num_newlines):
pytree_utils.SetNodeAnnotation(node, pytree_utils.Annotation.NEWLINES,
num_newlines)
def _IsTopLevel(self, node):
return (not (self.class_level or self.function_level) and
_StartsInZerothColumn(node))
def _StartsInZerothColumn(node):
return (pytree_utils.FirstLeafNode(node).column == 0 or
(_AsyncFunction(node) and node.prev_sibling.column == 0))
def _AsyncFunction(node):
return (py3compat.PY3 and node.prev_sibling and
pytree_utils.NodeName(node.prev_sibling) == 'ASYNC')
| gpl-3.0 |
gunan/tensorflow | tensorflow/python/kernel_tests/nth_element_op_test.py | 22 | 6937 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
class NthElementTest(test.TestCase):
def _validateNthElement(self, inputs, dtype, n, reverse, expected_values):
np_expected_values = np.array(expected_values)
with self.cached_session(use_gpu=False) as sess:
inputs_op = ops.convert_to_tensor(inputs, dtype=dtype)
values_op = nn_ops.nth_element(inputs_op, n, reverse=reverse)
values = self.evaluate(values_op)
self.assertShapeEqual(np_expected_values, values_op)
self.assertAllClose(np_expected_values, values)
def testExample1(self):
inputs = [2.2, 4.4, 1.1, 5.5, 3.3]
self._validateNthElement(inputs, dtypes.float32, 1, False, 2.2)
self._validateNthElement(inputs, dtypes.float32, 1, True, 4.4)
def testExample2(self):
inputs = [[2.2, 4.4, 1.1], [5.5, 3.3, 6.6]]
self._validateNthElement(inputs, dtypes.float64, 2, False, [4.4, 6.6])
self._validateNthElement(inputs, dtypes.float64, 2, True, [1.1, 3.3])
def testExample3(self):
inputs = [[[2, 4, 1], [5, -3, 6]],
[[7, 9, -8], [9, 0, 4]]]
self._validateNthElement(inputs, dtypes.int32, 0, False,
[[1, -3], [-8, 0]])
self._validateNthElement(inputs, dtypes.int64, 0, True,
[[4, 6], [9, 9]])
def _testFloatLargeInput(self, input_shape):
inputs = np.random.random_sample(input_shape)
n = np.random.randint(input_shape[-1])
sort_inputs = np.sort(inputs)
expected_values = sort_inputs[..., n]
self._validateNthElement(
inputs, dtypes.float32, n, False, expected_values)
expected_values = sort_inputs[..., ::-1][..., n]
self._validateNthElement(
inputs, dtypes.float64, n, True, expected_values)
def _testIntLargeInput(self, input_shape):
inputs = np.random.randint(-1e3, 1e3, input_shape)
n = np.random.randint(input_shape[-1])
sort_inputs = np.sort(inputs)
expected_values = sort_inputs[..., n]
self._validateNthElement(
inputs, dtypes.int32, n, False, expected_values)
expected_values = sort_inputs[..., ::-1][..., n]
self._validateNthElement(
inputs, dtypes.int64, n, True, expected_values)
def _testLargeInput(self, input_shape):
self._testFloatLargeInput(input_shape)
self._testIntLargeInput(input_shape)
def testLargeInput(self):
self._testLargeInput([1])
self._testLargeInput([10])
self._testLargeInput([5, 10])
self._testLargeInput([50, 100])
self._testLargeInput([50, 10000])
self._testLargeInput([50, 10, 100])
self._testLargeInput([50, 10, 10, 100])
def _testEnumerateN(self, input_shape):
inputs = np.random.random_sample(input_shape)
sort_inputs = np.sort(inputs)
for n in range(input_shape[-1]):
expected_values = sort_inputs[..., n]
self._validateNthElement(
inputs, dtypes.float32, n, False, expected_values)
expected_values = sort_inputs[..., ::-1][..., n]
self._validateNthElement(
inputs, dtypes.float64, n, True, expected_values)
def testEnumerateN(self):
self._testEnumerateN([1])
self._testEnumerateN([10])
self._testEnumerateN([10, 10])
self._testEnumerateN([10, 10, 10])
self._testEnumerateN([10, 10, 10, 10])
@test_util.run_deprecated_v1
def testInvalidInput(self):
with self.assertRaisesRegexp(ValueError,
"at least rank 1 but is rank 0"):
nn_ops.nth_element(5, 0)
@test_util.run_deprecated_v1
def testInvalidInputAtEval(self):
with self.session(use_gpu=False):
v = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesOpError("Input must be >= 1-D"):
nn_ops.nth_element(v, 0).eval(feed_dict={v: 5.0})
@test_util.run_deprecated_v1
def testInvalidN(self):
with self.assertRaisesRegexp(ValueError,
"non-negative but is -1"):
nn_ops.nth_element([5], -1)
with self.assertRaisesRegexp(ValueError,
"scalar but has rank 1"):
nn_ops.nth_element([5, 6, 3], [1])
@test_util.run_deprecated_v1
def testInvalidNAtEval(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.session(use_gpu=False):
n = array_ops.placeholder(dtypes.int32)
values = nn_ops.nth_element(inputs, n)
with self.assertRaisesOpError("Need n >= 0, got -7"):
values.eval(feed_dict={n: -7})
@test_util.run_deprecated_v1
def testNTooLarge(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.assertRaisesRegexp(ValueError,
"must have last dimension > n = 2"):
nn_ops.nth_element(inputs, 2)
@test_util.run_deprecated_v1
def testNTooLargeAtEval(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.session(use_gpu=False):
n = array_ops.placeholder(dtypes.int32)
values = nn_ops.nth_element(inputs, n)
with self.assertRaisesOpError(r"Input must have at least n\+1 columns"):
values.eval(feed_dict={n: 2})
@test_util.run_deprecated_v1
def testGradients(self):
with self.session(use_gpu=False) as sess:
inputs = array_ops.placeholder(dtypes.float32, shape=[3, 5])
values = nn_ops.nth_element(inputs, 3)
grad = sess.run(
gradients_impl.gradients(
values, inputs, grad_ys=[[-1., 2., 5.]]),
feed_dict={inputs: [[2., -1., 1000., 3., 1000.],
[1., 5., 2., 4., 3.],
[2., 2., 2., 2., 2.],
]})
self.assertAllClose(grad[0], [[0, 0, -0.5, 0, -0.5],
[0, 0, 0, 2, 0],
[1, 1, 1, 1, 1],
])
if __name__ == "__main__":
test.main()
| apache-2.0 |
paulthulstrup/moose | framework/scripts/find_dep_apps.py | 23 | 4563 | #!/usr/bin/env python
# This script finds a file in the herd trunk containing all the possible applications
# thay may be built with an "up" target. If passed the value ROOT it will simply
# return the root directory
import os, sys, re, subprocess
def findDepApps(dep_names, use_current_only=False):
dep_name = dep_names.split('~')[0]
app_dirs = []
moose_apps = ['framework', 'moose', 'test', 'unit', 'modules', 'examples']
apps = []
# First see if we are in a git repo
p = subprocess.Popen('git rev-parse --show-cdup', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
p.wait()
if p.returncode == 0:
git_dir = p.communicate()[0]
root_dir = os.path.abspath(os.path.join(os.getcwd(), git_dir)).rstrip()
# Assume that any application we care about is always a peer
dir_to_append = '.' if use_current_only else '..'
app_dirs.append(os.path.abspath(os.path.join(root_dir, dir_to_append)))
# Now see if we can find .build_apps in a parent directory from where we are at, usually "projects"
restrict_file = '.build_apps'
restrict_file_path = ''
restrict_dir = ''
next_dir = os.getcwd()
for i in range(4):
next_dir = os.path.join(next_dir, "..")
if os.path.isfile(os.path.join(next_dir, restrict_file)):
restrict_file_path = os.path.join(next_dir, restrict_file)
break
if restrict_file_path != '':
restrict_dir = os.path.dirname(os.path.abspath(restrict_file_path))
app_dirs.append(restrict_dir)
# Make sure that we found at least one directory to search
if len(app_dirs) == 0:
return ''
# unique paths to search
unique_dirs = set()
for dir in app_dirs:
unique_dirs.add(os.path.abspath(dir))
remove_dirs = set()
# now strip common paths
for dir1 in unique_dirs:
for dir2 in unique_dirs:
if dir1 == dir2:
continue
if dir1 in dir2:
remove_dirs.add(dir2)
elif dir2 in dir1:
remove_dirs.add(dir1)
# set difference
unique_dirs = unique_dirs - remove_dirs
if restrict_file_path != '':
f = open(restrict_file_path)
apps.extend(f.read().splitlines())
f.close()
# See which apps in this file are children or dependents of this app
dep_apps = set()
dep_dirs = set()
# moose, elk and modules have special rules
if dep_name == "moose":
dep_app_re=re.compile(r"\bmoose\.mk\b")
elif dep_name == "modules":
dep_app_re=re.compile(r"\bmodules\.mk\b")
elif dep_name == "elk":
dep_app_re=re.compile(r"\belk(?:_module)?\.mk\b")
else:
dep_app_re=re.compile(r"^\s*APPLICATION_NAME\s*:=\s*"+dep_name,re.MULTILINE)
ignores = ['.git', '.svn', '.libs', 'gold', 'src', 'include', 'contrib', 'tests', 'bak', 'tutorials']
for dir in unique_dirs:
startinglevel = dir.count(os.sep)
for dirpath, dirnames, filenames in os.walk(dir, topdown=True):
# Don't traverse too deep!
if dirpath.count(os.sep) - startinglevel >= 2: # 2 levels outta be enough for anybody
dirnames[:] = []
# Don't traverse into ignored directories
for ignore in ignores:
if ignore in dirnames:
dirnames.remove(ignore)
# Honor user ignored directories
if os.path.isfile(os.path.join(dirpath, '.moose_ignore')):
dirnames[:] = []
continue
# Don't traverse into submodules
if os.path.isfile(os.path.join(dirpath, '.gitmodules')):
f = open(os.path.join(dirpath, '.gitmodules'))
content = f.read()
f.close()
sub_mods = re.findall(r'path = (\w+)', content)
dirnames[:] = [x for x in dirnames if x not in sub_mods]
potential_makefile = os.path.join(dirpath, 'Makefile')
if os.path.isfile(potential_makefile):
f = open(potential_makefile)
lines = f.read()
f.close()
# We only want to build certain applications, look at the path to make a decision
# If we are in trunk, we will honor .build_apps. If we aren't, then we'll add it
eligible_app = dirpath.split('/')[-1]
if dep_app_re.search(lines) and ((len(apps) == 0 or eligible_app in apps) or ('/moose/' in dirpath and eligible_app in moose_apps)):
dep_apps.add(eligible_app)
dep_dirs.add(dirpath)
# Don't traverse once we've found a dependency
dirnames[:] = []
# Now we need to filter out duplicate moose apps
moose_dir = os.environ.get('MOOSE_DIR')
return '\n'.join(dep_dirs)
if __name__ == '__main__':
if len(sys.argv) == 2:
dep_apps = findDepApps(sys.argv[1], False)
print dep_apps
| lgpl-2.1 |
lummyare/lummyare-lummy | py/selenium/selenium.py | 7 | 80742 |
"""
Copyright 2011 Software Freedom Conservancy.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import unicode_literals
__docformat__ = "restructuredtext en"
try:
import http.client as http_client
except ImportError:
import httplib as http_client
try:
import urllib.parse as urllib_parse
except ImportError:
import urllib as urllib_parse
class selenium(object):
"""
Defines an object that runs Selenium commands.
**Element Locators**
Element Locators tell Selenium which HTML element a command refers to.
The format of a locator is:
\ *locatorType*\ **=**\ \ *argument*
We support the following strategies for locating elements:
* \ **identifier**\ =\ *id*:
Select the element with the specified @id attribute. If no match is
found, select the first element whose @name attribute is \ *id*.
(This is normally the default; see below.)
* \ **id**\ =\ *id*:
Select the element with the specified @id attribute.
* \ **name**\ =\ *name*:
Select the first element with the specified @name attribute.
* username
* name=username
The name may optionally be followed by one or more \ *element-filters*, separated from the name by whitespace. If the \ *filterType* is not specified, \ **value**\ is assumed.
* name=flavour value=chocolate
* \ **dom**\ =\ *javascriptExpression*:
Find an element by evaluating the specified string. This allows you to traverse the HTML Document Object
Model using JavaScript. Note that you must not return a value in this string; simply make it the last expression in the block.
* dom=document.forms['myForm'].myDropdown
* dom=document.images[56]
* dom=function foo() { return document.links[1]; }; foo();
* \ **xpath**\ =\ *xpathExpression*:
Locate an element using an XPath expression.
* xpath=//img[@alt='The image alt text']
* xpath=//table[@id='table1']//tr[4]/td[2]
* xpath=//a[contains(@href,'#id1')]
* xpath=//a[contains(@href,'#id1')]/@class
* xpath=(//table[@class='stylee'])//th[text()='theHeaderText']/../td
* xpath=//input[@name='name2' and @value='yes']
* xpath=//\*[text()="right"]
* \ **link**\ =\ *textPattern*:
Select the link (anchor) element which contains text matching the
specified \ *pattern*.
* link=The link text
* \ **css**\ =\ *cssSelectorSyntax*:
Select the element using css selectors. Please refer to CSS2 selectors, CSS3 selectors for more information. You can also check the TestCssLocators test in the selenium test suite for an example of usage, which is included in the downloaded selenium core package.
* css=a[href="#id3"]
* css=span#firstChild + span
Currently the css selector locator supports all css1, css2 and css3 selectors except namespace in css3, some pseudo classes(:nth-of-type, :nth-last-of-type, :first-of-type, :last-of-type, :only-of-type, :visited, :hover, :active, :focus, :indeterminate) and pseudo elements(::first-line, ::first-letter, ::selection, ::before, ::after).
* \ **ui**\ =\ *uiSpecifierString*:
Locate an element by resolving the UI specifier string to another locator, and evaluating it. See the Selenium UI-Element Reference for more details.
* ui=loginPages::loginButton()
* ui=settingsPages::toggle(label=Hide Email)
* ui=forumPages::postBody(index=2)//a[2]
Without an explicit locator prefix, Selenium uses the following default
strategies:
* \ **dom**\ , for locators starting with "document."
* \ **xpath**\ , for locators starting with "//"
* \ **identifier**\ , otherwise
**Element Filters**
Element filters can be used with a locator to refine a list of candidate elements. They are currently used only in the 'name' element-locator.
Filters look much like locators, ie.
\ *filterType*\ **=**\ \ *argument*
Supported element-filters are:
\ **value=**\ \ *valuePattern*
Matches elements based on their values. This is particularly useful for refining a list of similarly-named toggle-buttons.
\ **index=**\ \ *index*
Selects a single element based on its position in the list (offset from zero).
**String-match Patterns**
Various Pattern syntaxes are available for matching string values:
* \ **glob:**\ \ *pattern*:
Match a string against a "glob" (aka "wildmat") pattern. "Glob" is a
kind of limited regular-expression syntax typically used in command-line
shells. In a glob pattern, "\*" represents any sequence of characters, and "?"
represents any single character. Glob patterns match against the entire
string.
* \ **regexp:**\ \ *regexp*:
Match a string using a regular-expression. The full power of JavaScript
regular-expressions is available.
* \ **regexpi:**\ \ *regexpi*:
Match a string using a case-insensitive regular-expression.
* \ **exact:**\ \ *string*:
Match a string exactly, verbatim, without any of that fancy wildcard
stuff.
If no pattern prefix is specified, Selenium assumes that it's a "glob"
pattern.
For commands that return multiple values (such as verifySelectOptions),
the string being matched is a comma-separated list of the return values,
where both commas and backslashes in the values are backslash-escaped.
When providing a pattern, the optional matching syntax (i.e. glob,
regexp, etc.) is specified once, as usual, at the beginning of the
pattern.
"""
### This part is hard-coded in the XSL
def __init__(self, host, port, browserStartCommand, browserURL):
self.host = host
self.port = port
self.browserStartCommand = browserStartCommand
self.browserURL = browserURL
self.sessionId = None
self.extensionJs = ""
def setExtensionJs(self, extensionJs):
self.extensionJs = extensionJs
def start(self, browserConfigurationOptions=None, driver=None):
start_args = [self.browserStartCommand, self.browserURL, self.extensionJs]
if browserConfigurationOptions:
start_args.append(browserConfigurationOptions)
if driver:
id = driver.desired_capabilities['webdriver.remote.sessionid']
start_args.append('webdriver.remote.sessionid=%s' % id)
result = self.get_string("getNewBrowserSession", start_args)
try:
self.sessionId = result
except ValueError:
raise Exception(result)
def stop(self):
self.do_command("testComplete", [])
self.sessionId = None
def do_command(self, verb, args):
conn = http_client.HTTPConnection(self.host, self.port)
try:
body = 'cmd=' + urllib_parse.quote_plus(unicode(verb).encode('utf-8'))
for i in range(len(args)):
body += '&' + unicode(i+1) + '=' + \
urllib_parse.quote_plus(unicode(args[i]).encode('utf-8'))
if (None != self.sessionId):
body += "&sessionId=" + unicode(self.sessionId)
headers = {
"Content-Type":
"application/x-www-form-urlencoded; charset=utf-8"
}
conn.request("POST", "/selenium-server/driver/", body, headers)
response = conn.getresponse()
data = unicode(response.read(), "UTF-8")
if (not data.startswith('OK')):
raise Exception(data)
return data
finally:
conn.close()
def get_string(self, verb, args):
result = self.do_command(verb, args)
return result[3:]
def get_string_array(self, verb, args):
csv = self.get_string(verb, args)
if not csv:
return []
token = ""
tokens = []
escape = False
for i in range(len(csv)):
letter = csv[i]
if (escape):
token = token + letter
escape = False
continue
if (letter == '\\'):
escape = True
elif (letter == ','):
tokens.append(token)
token = ""
else:
token = token + letter
tokens.append(token)
return tokens
def get_number(self, verb, args):
return int(self.get_string(verb, args))
def get_number_array(self, verb, args):
string_array = self.get_string_array(verb, args)
num_array = []
for i in string_array:
num_array.append(int(i))
return num_array
def get_boolean(self, verb, args):
boolstr = self.get_string(verb, args)
if ("true" == boolstr):
return True
if ("false" == boolstr):
return False
raise ValueError("result is neither 'true' nor 'false': " + boolstr)
def get_boolean_array(self, verb, args):
boolarr = self.get_string_array(verb, args)
for i, boolstr in enumerate(boolarr):
if ("true" == boolstr):
boolarr[i] = True
continue
if ("false" == boolstr):
boolarr[i] = False
continue
raise ValueError("result is neither 'true' nor 'false': " + boolarr[i])
return boolarr
def click(self,locator):
"""
Clicks on a link, button, checkbox or radio button. If the click action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
"""
self.do_command("click", [locator,])
def double_click(self,locator):
"""
Double clicks on a link, button, checkbox or radio button. If the double click action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
"""
self.do_command("doubleClick", [locator,])
def context_menu(self,locator):
"""
Simulates opening the context menu for the specified element (as might happen if the user "right-clicked" on the element).
'locator' is an element locator
"""
self.do_command("contextMenu", [locator,])
def click_at(self,locator,coordString):
"""
Clicks on a link, button, checkbox or radio button. If the click action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("clickAt", [locator,coordString,])
def double_click_at(self,locator,coordString):
"""
Doubleclicks on a link, button, checkbox or radio button. If the action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("doubleClickAt", [locator,coordString,])
def context_menu_at(self,locator,coordString):
"""
Simulates opening the context menu for the specified element (as might happen if the user "right-clicked" on the element).
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("contextMenuAt", [locator,coordString,])
def fire_event(self,locator,eventName):
"""
Explicitly simulate an event, to trigger the corresponding "on\ *event*"
handler.
'locator' is an element locator
'eventName' is the event name, e.g. "focus" or "blur"
"""
self.do_command("fireEvent", [locator,eventName,])
def focus(self,locator):
"""
Move the focus to the specified element; for example, if the element is an input field, move the cursor to that field.
'locator' is an element locator
"""
self.do_command("focus", [locator,])
def key_press(self,locator,keySequence):
"""
Simulates a user pressing and releasing a key.
'locator' is an element locator
'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119".
"""
self.do_command("keyPress", [locator,keySequence,])
def shift_key_down(self):
"""
Press the shift key and hold it down until doShiftUp() is called or a new page is loaded.
"""
self.do_command("shiftKeyDown", [])
def shift_key_up(self):
"""
Release the shift key.
"""
self.do_command("shiftKeyUp", [])
def meta_key_down(self):
"""
Press the meta key and hold it down until doMetaUp() is called or a new page is loaded.
"""
self.do_command("metaKeyDown", [])
def meta_key_up(self):
"""
Release the meta key.
"""
self.do_command("metaKeyUp", [])
def alt_key_down(self):
"""
Press the alt key and hold it down until doAltUp() is called or a new page is loaded.
"""
self.do_command("altKeyDown", [])
def alt_key_up(self):
"""
Release the alt key.
"""
self.do_command("altKeyUp", [])
def control_key_down(self):
"""
Press the control key and hold it down until doControlUp() is called or a new page is loaded.
"""
self.do_command("controlKeyDown", [])
def control_key_up(self):
"""
Release the control key.
"""
self.do_command("controlKeyUp", [])
def key_down(self,locator,keySequence):
"""
Simulates a user pressing a key (without releasing it yet).
'locator' is an element locator
'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119".
"""
self.do_command("keyDown", [locator,keySequence,])
def key_up(self,locator,keySequence):
"""
Simulates a user releasing a key.
'locator' is an element locator
'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119".
"""
self.do_command("keyUp", [locator,keySequence,])
def mouse_over(self,locator):
"""
Simulates a user hovering a mouse over the specified element.
'locator' is an element locator
"""
self.do_command("mouseOver", [locator,])
def mouse_out(self,locator):
"""
Simulates a user moving the mouse pointer away from the specified element.
'locator' is an element locator
"""
self.do_command("mouseOut", [locator,])
def mouse_down(self,locator):
"""
Simulates a user pressing the left mouse button (without releasing it yet) on
the specified element.
'locator' is an element locator
"""
self.do_command("mouseDown", [locator,])
def mouse_down_right(self,locator):
"""
Simulates a user pressing the right mouse button (without releasing it yet) on
the specified element.
'locator' is an element locator
"""
self.do_command("mouseDownRight", [locator,])
def mouse_down_at(self,locator,coordString):
"""
Simulates a user pressing the left mouse button (without releasing it yet) at
the specified location.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseDownAt", [locator,coordString,])
def mouse_down_right_at(self,locator,coordString):
"""
Simulates a user pressing the right mouse button (without releasing it yet) at
the specified location.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseDownRightAt", [locator,coordString,])
def mouse_up(self,locator):
"""
Simulates the event that occurs when the user releases the mouse button (i.e., stops
holding the button down) on the specified element.
'locator' is an element locator
"""
self.do_command("mouseUp", [locator,])
def mouse_up_right(self,locator):
"""
Simulates the event that occurs when the user releases the right mouse button (i.e., stops
holding the button down) on the specified element.
'locator' is an element locator
"""
self.do_command("mouseUpRight", [locator,])
def mouse_up_at(self,locator,coordString):
"""
Simulates the event that occurs when the user releases the mouse button (i.e., stops
holding the button down) at the specified location.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseUpAt", [locator,coordString,])
def mouse_up_right_at(self,locator,coordString):
"""
Simulates the event that occurs when the user releases the right mouse button (i.e., stops
holding the button down) at the specified location.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseUpRightAt", [locator,coordString,])
def mouse_move(self,locator):
"""
Simulates a user pressing the mouse button (without releasing it yet) on
the specified element.
'locator' is an element locator
"""
self.do_command("mouseMove", [locator,])
def mouse_move_at(self,locator,coordString):
"""
Simulates a user pressing the mouse button (without releasing it yet) on
the specified element.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseMoveAt", [locator,coordString,])
def type(self,locator,value):
"""
Sets the value of an input field, as though you typed it in.
Can also be used to set the value of combo boxes, check boxes, etc. In these cases,
value should be the value of the option selected, not the visible text.
'locator' is an element locator
'value' is the value to type
"""
self.do_command("type", [locator,value,])
def type_keys(self,locator,value):
"""
Simulates keystroke events on the specified element, as though you typed the value key-by-key.
This is a convenience method for calling keyDown, keyUp, keyPress for every character in the specified string;
this is useful for dynamic UI widgets (like auto-completing combo boxes) that require explicit key events.
Unlike the simple "type" command, which forces the specified value into the page directly, this command
may or may not have any visible effect, even in cases where typing keys would normally have a visible effect.
For example, if you use "typeKeys" on a form element, you may or may not see the results of what you typed in
the field.
In some cases, you may need to use the simple "type" command to set the value of the field and then the "typeKeys" command to
send the keystroke events corresponding to what you just typed.
'locator' is an element locator
'value' is the value to type
"""
self.do_command("typeKeys", [locator,value,])
def set_speed(self,value):
"""
Set execution speed (i.e., set the millisecond length of a delay which will follow each selenium operation). By default, there is no such delay, i.e.,
the delay is 0 milliseconds.
'value' is the number of milliseconds to pause after operation
"""
self.do_command("setSpeed", [value,])
def get_speed(self):
"""
Get execution speed (i.e., get the millisecond length of the delay following each selenium operation). By default, there is no such delay, i.e.,
the delay is 0 milliseconds.
See also setSpeed.
"""
return self.get_string("getSpeed", [])
def get_log(self):
"""
Get RC logs associated with current session.
"""
return self.get_string("getLog", [])
def check(self,locator):
"""
Check a toggle-button (checkbox/radio)
'locator' is an element locator
"""
self.do_command("check", [locator,])
def uncheck(self,locator):
"""
Uncheck a toggle-button (checkbox/radio)
'locator' is an element locator
"""
self.do_command("uncheck", [locator,])
def select(self,selectLocator,optionLocator):
"""
Select an option from a drop-down using an option locator.
Option locators provide different ways of specifying options of an HTML
Select element (e.g. for selecting a specific option, or for asserting
that the selected option satisfies a specification). There are several
forms of Select Option Locator.
* \ **label**\ =\ *labelPattern*:
matches options based on their labels, i.e. the visible text. (This
is the default.)
* label=regexp:^[Oo]ther
* \ **value**\ =\ *valuePattern*:
matches options based on their values.
* value=other
* \ **id**\ =\ *id*:
matches options based on their ids.
* id=option1
* \ **index**\ =\ *index*:
matches an option based on its index (offset from zero).
* index=2
If no option locator prefix is provided, the default behaviour is to match on \ **label**\ .
'selectLocator' is an element locator identifying a drop-down menu
'optionLocator' is an option locator (a label by default)
"""
self.do_command("select", [selectLocator,optionLocator,])
def add_selection(self,locator,optionLocator):
"""
Add a selection to the set of selected options in a multi-select element using an option locator.
@see #doSelect for details of option locators
'locator' is an element locator identifying a multi-select box
'optionLocator' is an option locator (a label by default)
"""
self.do_command("addSelection", [locator,optionLocator,])
def remove_selection(self,locator,optionLocator):
"""
Remove a selection from the set of selected options in a multi-select element using an option locator.
@see #doSelect for details of option locators
'locator' is an element locator identifying a multi-select box
'optionLocator' is an option locator (a label by default)
"""
self.do_command("removeSelection", [locator,optionLocator,])
def remove_all_selections(self,locator):
"""
Unselects all of the selected options in a multi-select element.
'locator' is an element locator identifying a multi-select box
"""
self.do_command("removeAllSelections", [locator,])
def submit(self,formLocator):
"""
Submit the specified form. This is particularly useful for forms without
submit buttons, e.g. single-input "Search" forms.
'formLocator' is an element locator for the form you want to submit
"""
self.do_command("submit", [formLocator,])
def open(self,url,ignoreResponseCode=True):
"""
Opens an URL in the test frame. This accepts both relative and absolute
URLs.
The "open" command waits for the page to load before proceeding,
ie. the "AndWait" suffix is implicit.
\ *Note*: The URL must be on the same domain as the runner HTML
due to security restrictions in the browser (Same Origin Policy). If you
need to open an URL on another domain, use the Selenium Server to start a
new browser session on that domain.
'url' is the URL to open; may be relative or absolute
'ignoreResponseCode' if set to true: doesnt send ajax HEAD/GET request; if set to false: sends ajax HEAD/GET request to the url and reports error code if any as response to open.
"""
self.do_command("open", [url,ignoreResponseCode])
def open_window(self,url,windowID):
"""
Opens a popup window (if a window with that ID isn't already open).
After opening the window, you'll need to select it using the selectWindow
command.
This command can also be a useful workaround for bug SEL-339. In some cases, Selenium will be unable to intercept a call to window.open (if the call occurs during or before the "onLoad" event, for example).
In those cases, you can force Selenium to notice the open window's name by using the Selenium openWindow command, using
an empty (blank) url, like this: openWindow("", "myFunnyWindow").
'url' is the URL to open, which can be blank
'windowID' is the JavaScript window ID of the window to select
"""
self.do_command("openWindow", [url,windowID,])
def select_window(self,windowID):
"""
Selects a popup window using a window locator; once a popup window has been selected, all
commands go to that window. To select the main window again, use null
as the target.
Window locators provide different ways of specifying the window object:
by title, by internal JavaScript "name," or by JavaScript variable.
* \ **title**\ =\ *My Special Window*:
Finds the window using the text that appears in the title bar. Be careful;
two windows can share the same title. If that happens, this locator will
just pick one.
* \ **name**\ =\ *myWindow*:
Finds the window using its internal JavaScript "name" property. This is the second
parameter "windowName" passed to the JavaScript method window.open(url, windowName, windowFeatures, replaceFlag)
(which Selenium intercepts).
* \ **var**\ =\ *variableName*:
Some pop-up windows are unnamed (anonymous), but are associated with a JavaScript variable name in the current
application window, e.g. "window.foo = window.open(url);". In those cases, you can open the window using
"var=foo".
If no window locator prefix is provided, we'll try to guess what you mean like this:
1.) if windowID is null, (or the string "null") then it is assumed the user is referring to the original window instantiated by the browser).
2.) if the value of the "windowID" parameter is a JavaScript variable name in the current application window, then it is assumed
that this variable contains the return value from a call to the JavaScript window.open() method.
3.) Otherwise, selenium looks in a hash it maintains that maps string names to window "names".
4.) If \ *that* fails, we'll try looping over all of the known windows to try to find the appropriate "title".
Since "title" is not necessarily unique, this may have unexpected behavior.
If you're having trouble figuring out the name of a window that you want to manipulate, look at the Selenium log messages
which identify the names of windows created via window.open (and therefore intercepted by Selenium). You will see messages
like the following for each window as it is opened:
``debug: window.open call intercepted; window ID (which you can use with selectWindow()) is "myNewWindow"``
In some cases, Selenium will be unable to intercept a call to window.open (if the call occurs during or before the "onLoad" event, for example).
(This is bug SEL-339.) In those cases, you can force Selenium to notice the open window's name by using the Selenium openWindow command, using
an empty (blank) url, like this: openWindow("", "myFunnyWindow").
'windowID' is the JavaScript window ID of the window to select
"""
self.do_command("selectWindow", [windowID,])
def select_pop_up(self,windowID):
"""
Simplifies the process of selecting a popup window (and does not offer
functionality beyond what ``selectWindow()`` already provides).
* If ``windowID`` is either not specified, or specified as
"null", the first non-top window is selected. The top window is the one
that would be selected by ``selectWindow()`` without providing a
``windowID`` . This should not be used when more than one popup
window is in play.
* Otherwise, the window will be looked up considering
``windowID`` as the following in order: 1) the "name" of the
window, as specified to ``window.open()``; 2) a javascript
variable which is a reference to a window; and 3) the title of the
window. This is the same ordered lookup performed by
``selectWindow`` .
'windowID' is an identifier for the popup window, which can take on a number of different meanings
"""
self.do_command("selectPopUp", [windowID,])
def deselect_pop_up(self):
"""
Selects the main window. Functionally equivalent to using
``selectWindow()`` and specifying no value for
``windowID``.
"""
self.do_command("deselectPopUp", [])
def select_frame(self,locator):
"""
Selects a frame within the current window. (You may invoke this command
multiple times to select nested frames.) To select the parent frame, use
"relative=parent" as a locator; to select the top frame, use "relative=top".
You can also select a frame by its 0-based index number; select the first frame with
"index=0", or the third frame with "index=2".
You may also use a DOM expression to identify the frame you want directly,
like this: ``dom=frames["main"].frames["subframe"]``
'locator' is an element locator identifying a frame or iframe
"""
self.do_command("selectFrame", [locator,])
def get_whether_this_frame_match_frame_expression(self,currentFrameString,target):
"""
Determine whether current/locator identify the frame containing this running code.
This is useful in proxy injection mode, where this code runs in every
browser frame and window, and sometimes the selenium server needs to identify
the "current" frame. In this case, when the test calls selectFrame, this
routine is called for each frame to figure out which one has been selected.
The selected frame will return true, while all others will return false.
'currentFrameString' is starting frame
'target' is new frame (which might be relative to the current one)
"""
return self.get_boolean("getWhetherThisFrameMatchFrameExpression", [currentFrameString,target,])
def get_whether_this_window_match_window_expression(self,currentWindowString,target):
"""
Determine whether currentWindowString plus target identify the window containing this running code.
This is useful in proxy injection mode, where this code runs in every
browser frame and window, and sometimes the selenium server needs to identify
the "current" window. In this case, when the test calls selectWindow, this
routine is called for each window to figure out which one has been selected.
The selected window will return true, while all others will return false.
'currentWindowString' is starting window
'target' is new window (which might be relative to the current one, e.g., "_parent")
"""
return self.get_boolean("getWhetherThisWindowMatchWindowExpression", [currentWindowString,target,])
def wait_for_pop_up(self,windowID,timeout):
"""
Waits for a popup window to appear and load up.
'windowID' is the JavaScript window "name" of the window that will appear (not the text of the title bar) If unspecified, or specified as "null", this command will wait for the first non-top window to appear (don't rely on this if you are working with multiple popups simultaneously).
'timeout' is a timeout in milliseconds, after which the action will return with an error. If this value is not specified, the default Selenium timeout will be used. See the setTimeout() command.
"""
self.do_command("waitForPopUp", [windowID,timeout,])
def choose_cancel_on_next_confirmation(self):
"""
By default, Selenium's overridden window.confirm() function will
return true, as if the user had manually clicked OK; after running
this command, the next call to confirm() will return false, as if
the user had clicked Cancel. Selenium will then resume using the
default behavior for future confirmations, automatically returning
true (OK) unless/until you explicitly call this command for each
confirmation.
Take note - every time a confirmation comes up, you must
consume it with a corresponding getConfirmation, or else
the next selenium operation will fail.
"""
self.do_command("chooseCancelOnNextConfirmation", [])
def choose_ok_on_next_confirmation(self):
"""
Undo the effect of calling chooseCancelOnNextConfirmation. Note
that Selenium's overridden window.confirm() function will normally automatically
return true, as if the user had manually clicked OK, so you shouldn't
need to use this command unless for some reason you need to change
your mind prior to the next confirmation. After any confirmation, Selenium will resume using the
default behavior for future confirmations, automatically returning
true (OK) unless/until you explicitly call chooseCancelOnNextConfirmation for each
confirmation.
Take note - every time a confirmation comes up, you must
consume it with a corresponding getConfirmation, or else
the next selenium operation will fail.
"""
self.do_command("chooseOkOnNextConfirmation", [])
def answer_on_next_prompt(self,answer):
"""
Instructs Selenium to return the specified answer string in response to
the next JavaScript prompt [window.prompt()].
'answer' is the answer to give in response to the prompt pop-up
"""
self.do_command("answerOnNextPrompt", [answer,])
def go_back(self):
"""
Simulates the user clicking the "back" button on their browser.
"""
self.do_command("goBack", [])
def refresh(self):
"""
Simulates the user clicking the "Refresh" button on their browser.
"""
self.do_command("refresh", [])
def close(self):
"""
Simulates the user clicking the "close" button in the titlebar of a popup
window or tab.
"""
self.do_command("close", [])
def is_alert_present(self):
"""
Has an alert occurred?
This function never throws an exception
"""
return self.get_boolean("isAlertPresent", [])
def is_prompt_present(self):
"""
Has a prompt occurred?
This function never throws an exception
"""
return self.get_boolean("isPromptPresent", [])
def is_confirmation_present(self):
"""
Has confirm() been called?
This function never throws an exception
"""
return self.get_boolean("isConfirmationPresent", [])
def get_alert(self):
"""
Retrieves the message of a JavaScript alert generated during the previous action, or fail if there were no alerts.
Getting an alert has the same effect as manually clicking OK. If an
alert is generated but you do not consume it with getAlert, the next Selenium action
will fail.
Under Selenium, JavaScript alerts will NOT pop up a visible alert
dialog.
Selenium does NOT support JavaScript alerts that are generated in a
page's onload() event handler. In this case a visible dialog WILL be
generated and Selenium will hang until someone manually clicks OK.
"""
return self.get_string("getAlert", [])
def get_confirmation(self):
"""
Retrieves the message of a JavaScript confirmation dialog generated during
the previous action.
By default, the confirm function will return true, having the same effect
as manually clicking OK. This can be changed by prior execution of the
chooseCancelOnNextConfirmation command.
If an confirmation is generated but you do not consume it with getConfirmation,
the next Selenium action will fail.
NOTE: under Selenium, JavaScript confirmations will NOT pop up a visible
dialog.
NOTE: Selenium does NOT support JavaScript confirmations that are
generated in a page's onload() event handler. In this case a visible
dialog WILL be generated and Selenium will hang until you manually click
OK.
"""
return self.get_string("getConfirmation", [])
def get_prompt(self):
"""
Retrieves the message of a JavaScript question prompt dialog generated during
the previous action.
Successful handling of the prompt requires prior execution of the
answerOnNextPrompt command. If a prompt is generated but you
do not get/verify it, the next Selenium action will fail.
NOTE: under Selenium, JavaScript prompts will NOT pop up a visible
dialog.
NOTE: Selenium does NOT support JavaScript prompts that are generated in a
page's onload() event handler. In this case a visible dialog WILL be
generated and Selenium will hang until someone manually clicks OK.
"""
return self.get_string("getPrompt", [])
def get_location(self):
"""
Gets the absolute URL of the current page.
"""
return self.get_string("getLocation", [])
def get_title(self):
"""
Gets the title of the current page.
"""
return self.get_string("getTitle", [])
def get_body_text(self):
"""
Gets the entire text of the page.
"""
return self.get_string("getBodyText", [])
def get_value(self,locator):
"""
Gets the (whitespace-trimmed) value of an input field (or anything else with a value parameter).
For checkbox/radio elements, the value will be "on" or "off" depending on
whether the element is checked or not.
'locator' is an element locator
"""
return self.get_string("getValue", [locator,])
def get_text(self,locator):
"""
Gets the text of an element. This works for any element that contains
text. This command uses either the textContent (Mozilla-like browsers) or
the innerText (IE-like browsers) of the element, which is the rendered
text shown to the user.
'locator' is an element locator
"""
return self.get_string("getText", [locator,])
def highlight(self,locator):
"""
Briefly changes the backgroundColor of the specified element yellow. Useful for debugging.
'locator' is an element locator
"""
self.do_command("highlight", [locator,])
def get_eval(self,script):
"""
Gets the result of evaluating the specified JavaScript snippet. The snippet may
have multiple lines, but only the result of the last line will be returned.
Note that, by default, the snippet will run in the context of the "selenium"
object itself, so ``this`` will refer to the Selenium object. Use ``window`` to
refer to the window of your application, e.g. ``window.document.getElementById('foo')``
If you need to use
a locator to refer to a single element in your application page, you can
use ``this.browserbot.findElement("id=foo")`` where "id=foo" is your locator.
'script' is the JavaScript snippet to run
"""
return self.get_string("getEval", [script,])
def is_checked(self,locator):
"""
Gets whether a toggle-button (checkbox/radio) is checked. Fails if the specified element doesn't exist or isn't a toggle-button.
'locator' is an element locator pointing to a checkbox or radio button
"""
return self.get_boolean("isChecked", [locator,])
def get_table(self,tableCellAddress):
"""
Gets the text from a cell of a table. The cellAddress syntax
tableLocator.row.column, where row and column start at 0.
'tableCellAddress' is a cell address, e.g. "foo.1.4"
"""
return self.get_string("getTable", [tableCellAddress,])
def get_selected_labels(self,selectLocator):
"""
Gets all option labels (visible text) for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedLabels", [selectLocator,])
def get_selected_label(self,selectLocator):
"""
Gets option label (visible text) for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedLabel", [selectLocator,])
def get_selected_values(self,selectLocator):
"""
Gets all option values (value attributes) for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedValues", [selectLocator,])
def get_selected_value(self,selectLocator):
"""
Gets option value (value attribute) for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedValue", [selectLocator,])
def get_selected_indexes(self,selectLocator):
"""
Gets all option indexes (option number, starting at 0) for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedIndexes", [selectLocator,])
def get_selected_index(self,selectLocator):
"""
Gets option index (option number, starting at 0) for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedIndex", [selectLocator,])
def get_selected_ids(self,selectLocator):
"""
Gets all option element IDs for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedIds", [selectLocator,])
def get_selected_id(self,selectLocator):
"""
Gets option element ID for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedId", [selectLocator,])
def is_something_selected(self,selectLocator):
"""
Determines whether some option in a drop-down menu is selected.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_boolean("isSomethingSelected", [selectLocator,])
def get_select_options(self,selectLocator):
"""
Gets all option labels in the specified select drop-down.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectOptions", [selectLocator,])
def get_attribute(self,attributeLocator):
"""
Gets the value of an element attribute. The value of the attribute may
differ across browsers (this is the case for the "style" attribute, for
example).
'attributeLocator' is an element locator followed by an @ sign and then the name of the attribute, e.g. "foo@bar"
"""
return self.get_string("getAttribute", [attributeLocator,])
def is_text_present(self,pattern):
"""
Verifies that the specified text pattern appears somewhere on the rendered page shown to the user.
'pattern' is a pattern to match with the text of the page
"""
return self.get_boolean("isTextPresent", [pattern,])
def is_element_present(self,locator):
"""
Verifies that the specified element is somewhere on the page.
'locator' is an element locator
"""
return self.get_boolean("isElementPresent", [locator,])
def is_visible(self,locator):
"""
Determines if the specified element is visible. An
element can be rendered invisible by setting the CSS "visibility"
property to "hidden", or the "display" property to "none", either for the
element itself or one if its ancestors. This method will fail if
the element is not present.
'locator' is an element locator
"""
return self.get_boolean("isVisible", [locator,])
def is_editable(self,locator):
"""
Determines whether the specified input element is editable, ie hasn't been disabled.
This method will fail if the specified element isn't an input element.
'locator' is an element locator
"""
return self.get_boolean("isEditable", [locator,])
def get_all_buttons(self):
"""
Returns the IDs of all buttons on the page.
If a given button has no ID, it will appear as "" in this array.
"""
return self.get_string_array("getAllButtons", [])
def get_all_links(self):
"""
Returns the IDs of all links on the page.
If a given link has no ID, it will appear as "" in this array.
"""
return self.get_string_array("getAllLinks", [])
def get_all_fields(self):
"""
Returns the IDs of all input fields on the page.
If a given field has no ID, it will appear as "" in this array.
"""
return self.get_string_array("getAllFields", [])
def get_attribute_from_all_windows(self,attributeName):
"""
Returns every instance of some attribute from all known windows.
'attributeName' is name of an attribute on the windows
"""
return self.get_string_array("getAttributeFromAllWindows", [attributeName,])
def dragdrop(self,locator,movementsString):
"""
deprecated - use dragAndDrop instead
'locator' is an element locator
'movementsString' is offset in pixels from the current location to which the element should be moved, e.g., "+70,-300"
"""
self.do_command("dragdrop", [locator,movementsString,])
def set_mouse_speed(self,pixels):
"""
Configure the number of pixels between "mousemove" events during dragAndDrop commands (default=10).
Setting this value to 0 means that we'll send a "mousemove" event to every single pixel
in between the start location and the end location; that can be very slow, and may
cause some browsers to force the JavaScript to timeout.
If the mouse speed is greater than the distance between the two dragged objects, we'll
just send one "mousemove" at the start location and then one final one at the end location.
'pixels' is the number of pixels between "mousemove" events
"""
self.do_command("setMouseSpeed", [pixels,])
def get_mouse_speed(self):
"""
Returns the number of pixels between "mousemove" events during dragAndDrop commands (default=10).
"""
return self.get_number("getMouseSpeed", [])
def drag_and_drop(self,locator,movementsString):
"""
Drags an element a certain distance and then drops it
'locator' is an element locator
'movementsString' is offset in pixels from the current location to which the element should be moved, e.g., "+70,-300"
"""
self.do_command("dragAndDrop", [locator,movementsString,])
def drag_and_drop_to_object(self,locatorOfObjectToBeDragged,locatorOfDragDestinationObject):
"""
Drags an element and drops it on another element
'locatorOfObjectToBeDragged' is an element to be dragged
'locatorOfDragDestinationObject' is an element whose location (i.e., whose center-most pixel) will be the point where locatorOfObjectToBeDragged is dropped
"""
self.do_command("dragAndDropToObject", [locatorOfObjectToBeDragged,locatorOfDragDestinationObject,])
def window_focus(self):
"""
Gives focus to the currently selected window
"""
self.do_command("windowFocus", [])
def window_maximize(self):
"""
Resize currently selected window to take up the entire screen
"""
self.do_command("windowMaximize", [])
def get_all_window_ids(self):
"""
Returns the IDs of all windows that the browser knows about.
"""
return self.get_string_array("getAllWindowIds", [])
def get_all_window_names(self):
"""
Returns the names of all windows that the browser knows about.
"""
return self.get_string_array("getAllWindowNames", [])
def get_all_window_titles(self):
"""
Returns the titles of all windows that the browser knows about.
"""
return self.get_string_array("getAllWindowTitles", [])
def get_html_source(self):
"""
Returns the entire HTML source between the opening and
closing "html" tags.
"""
return self.get_string("getHtmlSource", [])
def set_cursor_position(self,locator,position):
"""
Moves the text cursor to the specified position in the given input element or textarea.
This method will fail if the specified element isn't an input element or textarea.
'locator' is an element locator pointing to an input element or textarea
'position' is the numerical position of the cursor in the field; position should be 0 to move the position to the beginning of the field. You can also set the cursor to -1 to move it to the end of the field.
"""
self.do_command("setCursorPosition", [locator,position,])
def get_element_index(self,locator):
"""
Get the relative index of an element to its parent (starting from 0). The comment node and empty text node
will be ignored.
'locator' is an element locator pointing to an element
"""
return self.get_number("getElementIndex", [locator,])
def is_ordered(self,locator1,locator2):
"""
Check if these two elements have same parent and are ordered siblings in the DOM. Two same elements will
not be considered ordered.
'locator1' is an element locator pointing to the first element
'locator2' is an element locator pointing to the second element
"""
return self.get_boolean("isOrdered", [locator1,locator2,])
def get_element_position_left(self,locator):
"""
Retrieves the horizontal position of an element
'locator' is an element locator pointing to an element OR an element itself
"""
return self.get_number("getElementPositionLeft", [locator,])
def get_element_position_top(self,locator):
"""
Retrieves the vertical position of an element
'locator' is an element locator pointing to an element OR an element itself
"""
return self.get_number("getElementPositionTop", [locator,])
def get_element_width(self,locator):
"""
Retrieves the width of an element
'locator' is an element locator pointing to an element
"""
return self.get_number("getElementWidth", [locator,])
def get_element_height(self,locator):
"""
Retrieves the height of an element
'locator' is an element locator pointing to an element
"""
return self.get_number("getElementHeight", [locator,])
def get_cursor_position(self,locator):
"""
Retrieves the text cursor position in the given input element or textarea; beware, this may not work perfectly on all browsers.
Specifically, if the cursor/selection has been cleared by JavaScript, this command will tend to
return the position of the last location of the cursor, even though the cursor is now gone from the page. This is filed as SEL-243.
This method will fail if the specified element isn't an input element or textarea, or there is no cursor in the element.
'locator' is an element locator pointing to an input element or textarea
"""
return self.get_number("getCursorPosition", [locator,])
def get_expression(self,expression):
"""
Returns the specified expression.
This is useful because of JavaScript preprocessing.
It is used to generate commands like assertExpression and waitForExpression.
'expression' is the value to return
"""
return self.get_string("getExpression", [expression,])
def get_xpath_count(self,xpath):
"""
Returns the number of nodes that match the specified xpath, eg. "//table" would give
the number of tables.
'xpath' is the xpath expression to evaluate. do NOT wrap this expression in a 'count()' function; we will do that for you.
"""
return self.get_number("getXpathCount", [xpath,])
def get_css_count(self,css):
"""
Returns the number of nodes that match the specified css selector, eg. "css=table" would give
the number of tables.
'css' is the css selector to evaluate. do NOT wrap this expression in a 'count()' function; we will do that for you.
"""
return self.get_number("getCssCount", [css,])
def assign_id(self,locator,identifier):
"""
Temporarily sets the "id" attribute of the specified element, so you can locate it in the future
using its ID rather than a slow/complicated XPath. This ID will disappear once the page is
reloaded.
'locator' is an element locator pointing to an element
'identifier' is a string to be used as the ID of the specified element
"""
self.do_command("assignId", [locator,identifier,])
def allow_native_xpath(self,allow):
"""
Specifies whether Selenium should use the native in-browser implementation
of XPath (if any native version is available); if you pass "false" to
this function, we will always use our pure-JavaScript xpath library.
Using the pure-JS xpath library can improve the consistency of xpath
element locators between different browser vendors, but the pure-JS
version is much slower than the native implementations.
'allow' is boolean, true means we'll prefer to use native XPath; false means we'll only use JS XPath
"""
self.do_command("allowNativeXpath", [allow,])
def ignore_attributes_without_value(self,ignore):
"""
Specifies whether Selenium will ignore xpath attributes that have no
value, i.e. are the empty string, when using the non-native xpath
evaluation engine. You'd want to do this for performance reasons in IE.
However, this could break certain xpaths, for example an xpath that looks
for an attribute whose value is NOT the empty string.
The hope is that such xpaths are relatively rare, but the user should
have the option of using them. Note that this only influences xpath
evaluation when using the ajaxslt engine (i.e. not "javascript-xpath").
'ignore' is boolean, true means we'll ignore attributes without value at the expense of xpath "correctness"; false means we'll sacrifice speed for correctness.
"""
self.do_command("ignoreAttributesWithoutValue", [ignore,])
def wait_for_condition(self,script,timeout):
"""
Runs the specified JavaScript snippet repeatedly until it evaluates to "true".
The snippet may have multiple lines, but only the result of the last line
will be considered.
Note that, by default, the snippet will be run in the runner's test window, not in the window
of your application. To get the window of your application, you can use
the JavaScript snippet ``selenium.browserbot.getCurrentWindow()``, and then
run your JavaScript in there
'script' is the JavaScript snippet to run
'timeout' is a timeout in milliseconds, after which this command will return with an error
"""
self.do_command("waitForCondition", [script,timeout,])
def set_timeout(self,timeout):
"""
Specifies the amount of time that Selenium will wait for actions to complete.
Actions that require waiting include "open" and the "waitFor\*" actions.
The default timeout is 30 seconds.
'timeout' is a timeout in milliseconds, after which the action will return with an error
"""
self.do_command("setTimeout", [timeout,])
def wait_for_page_to_load(self,timeout):
"""
Waits for a new page to load.
You can use this command instead of the "AndWait" suffixes, "clickAndWait", "selectAndWait", "typeAndWait" etc.
(which are only available in the JS API).
Selenium constantly keeps track of new pages loading, and sets a "newPageLoaded"
flag when it first notices a page load. Running any other Selenium command after
turns the flag to false. Hence, if you want to wait for a page to load, you must
wait immediately after a Selenium command that caused a page-load.
'timeout' is a timeout in milliseconds, after which this command will return with an error
"""
self.do_command("waitForPageToLoad", [timeout,])
def wait_for_frame_to_load(self,frameAddress,timeout):
"""
Waits for a new frame to load.
Selenium constantly keeps track of new pages and frames loading,
and sets a "newPageLoaded" flag when it first notices a page load.
See waitForPageToLoad for more information.
'frameAddress' is FrameAddress from the server side
'timeout' is a timeout in milliseconds, after which this command will return with an error
"""
self.do_command("waitForFrameToLoad", [frameAddress,timeout,])
def get_cookie(self):
"""
Return all cookies of the current page under test.
"""
return self.get_string("getCookie", [])
def get_cookie_by_name(self,name):
"""
Returns the value of the cookie with the specified name, or throws an error if the cookie is not present.
'name' is the name of the cookie
"""
return self.get_string("getCookieByName", [name,])
def is_cookie_present(self,name):
"""
Returns true if a cookie with the specified name is present, or false otherwise.
'name' is the name of the cookie
"""
return self.get_boolean("isCookiePresent", [name,])
def create_cookie(self,nameValuePair,optionsString):
"""
Create a new cookie whose path and domain are same with those of current page
under test, unless you specified a path for this cookie explicitly.
'nameValuePair' is name and value of the cookie in a format "name=value"
'optionsString' is options for the cookie. Currently supported options include 'path', 'max_age' and 'domain'. the optionsString's format is "path=/path/, max_age=60, domain=.foo.com". The order of options are irrelevant, the unit of the value of 'max_age' is second. Note that specifying a domain that isn't a subset of the current domain will usually fail.
"""
self.do_command("createCookie", [nameValuePair,optionsString,])
def delete_cookie(self,name,optionsString):
"""
Delete a named cookie with specified path and domain. Be careful; to delete a cookie, you
need to delete it using the exact same path and domain that were used to create the cookie.
If the path is wrong, or the domain is wrong, the cookie simply won't be deleted. Also
note that specifying a domain that isn't a subset of the current domain will usually fail.
Since there's no way to discover at runtime the original path and domain of a given cookie,
we've added an option called 'recurse' to try all sub-domains of the current domain with
all paths that are a subset of the current path. Beware; this option can be slow. In
big-O notation, it operates in O(n\*m) time, where n is the number of dots in the domain
name and m is the number of slashes in the path.
'name' is the name of the cookie to be deleted
'optionsString' is options for the cookie. Currently supported options include 'path', 'domain' and 'recurse.' The optionsString's format is "path=/path/, domain=.foo.com, recurse=true". The order of options are irrelevant. Note that specifying a domain that isn't a subset of the current domain will usually fail.
"""
self.do_command("deleteCookie", [name,optionsString,])
def delete_all_visible_cookies(self):
"""
Calls deleteCookie with recurse=true on all cookies visible to the current page.
As noted on the documentation for deleteCookie, recurse=true can be much slower
than simply deleting the cookies using a known domain/path.
"""
self.do_command("deleteAllVisibleCookies", [])
def set_browser_log_level(self,logLevel):
"""
Sets the threshold for browser-side logging messages; log messages beneath this threshold will be discarded.
Valid logLevel strings are: "debug", "info", "warn", "error" or "off".
To see the browser logs, you need to
either show the log window in GUI mode, or enable browser-side logging in Selenium RC.
'logLevel' is one of the following: "debug", "info", "warn", "error" or "off"
"""
self.do_command("setBrowserLogLevel", [logLevel,])
def run_script(self,script):
"""
Creates a new "script" tag in the body of the current test window, and
adds the specified text into the body of the command. Scripts run in
this way can often be debugged more easily than scripts executed using
Selenium's "getEval" command. Beware that JS exceptions thrown in these script
tags aren't managed by Selenium, so you should probably wrap your script
in try/catch blocks if there is any chance that the script will throw
an exception.
'script' is the JavaScript snippet to run
"""
self.do_command("runScript", [script,])
def add_location_strategy(self,strategyName,functionDefinition):
"""
Defines a new function for Selenium to locate elements on the page.
For example,
if you define the strategy "foo", and someone runs click("foo=blah"), we'll
run your function, passing you the string "blah", and click on the element
that your function
returns, or throw an "Element not found" error if your function returns null.
We'll pass three arguments to your function:
* locator: the string the user passed in
* inWindow: the currently selected window
* inDocument: the currently selected document
The function must return null if the element can't be found.
'strategyName' is the name of the strategy to define; this should use only letters [a-zA-Z] with no spaces or other punctuation.
'functionDefinition' is a string defining the body of a function in JavaScript. For example: ``return inDocument.getElementById(locator);``
"""
self.do_command("addLocationStrategy", [strategyName,functionDefinition,])
def capture_entire_page_screenshot(self,filename,kwargs):
"""
Saves the entire contents of the current window canvas to a PNG file.
Contrast this with the captureScreenshot command, which captures the
contents of the OS viewport (i.e. whatever is currently being displayed
on the monitor), and is implemented in the RC only. Currently this only
works in Firefox when running in chrome mode, and in IE non-HTA using
the EXPERIMENTAL "Snapsie" utility. The Firefox implementation is mostly
borrowed from the Screengrab! Firefox extension. Please see
http://www.screengrab.org and http://snapsie.sourceforge.net/ for
details.
'filename' is the path to the file to persist the screenshot as. No
filename extension will be appended by default. Directories will not be
created if they do not exist, and an exception will be thrown, possibly
by native code.
'kwargs' is a kwargs string that modifies the way the
screenshot is captured.
Example: "background=#CCFFDD"
Currently valid options:
* background
the background CSS for the HTML document.
This may be useful to set for capturing screenshots of
less-than-ideal layouts, for example where absolute positioning
causes the calculation of the canvas dimension to fail and a black
background is exposed (possibly obscuring black text).
"""
self.do_command("captureEntirePageScreenshot", [filename,kwargs,])
def rollup(self,rollupName,kwargs):
"""
Executes a command rollup, which is a series of commands with a unique
name, and optionally arguments that control the generation of the set of
commands. If any one of the rolled-up commands fails, the rollup is
considered to have failed. Rollups may also contain nested rollups.
'rollupName' is the name of the rollup command
'kwargs' is keyword arguments string that influences how the rollup expands into commands
"""
self.do_command("rollup", [rollupName,kwargs,])
def add_script(self,scriptContent,scriptTagId):
"""
Loads script content into a new script tag in the Selenium document. This
differs from the runScript command in that runScript adds the script tag
to the document of the AUT, not the Selenium document. The following
entities in the script content are replaced by the characters they
represent:
<
>
&
The corresponding remove command is removeScript.
'scriptContent' is the Javascript content of the script to add
'scriptTagId' is (optional) the id of the new script tag. If specified, and an element with this id already exists, this operation will fail.
"""
self.do_command("addScript", [scriptContent,scriptTagId,])
def remove_script(self,scriptTagId):
"""
Removes a script tag from the Selenium document identified by the given
id. Does nothing if the referenced tag doesn't exist.
'scriptTagId' is the id of the script element to remove.
"""
self.do_command("removeScript", [scriptTagId,])
def use_xpath_library(self,libraryName):
"""
Allows choice of one of the available libraries.
'libraryName' is name of the desired library Only the following three can be chosen:
* "ajaxslt" - Google's library
* "javascript-xpath" - Cybozu Labs' faster library
* "default" - The default library. Currently the default library is "ajaxslt" .
If libraryName isn't one of these three, then no change will be made.
"""
self.do_command("useXpathLibrary", [libraryName,])
def set_context(self,context):
"""
Writes a message to the status bar and adds a note to the browser-side
log.
'context' is the message to be sent to the browser
"""
self.do_command("setContext", [context,])
def attach_file(self,fieldLocator,fileLocator):
"""
Sets a file input (upload) field to the file listed in fileLocator
'fieldLocator' is an element locator
'fileLocator' is a URL pointing to the specified file. Before the file can be set in the input field (fieldLocator), Selenium RC may need to transfer the file to the local machine before attaching the file in a web page form. This is common in selenium grid configurations where the RC server driving the browser is not the same machine that started the test. Supported Browsers: Firefox ("\*chrome") only.
"""
self.do_command("attachFile", [fieldLocator,fileLocator,])
def capture_screenshot(self,filename):
"""
Captures a PNG screenshot to the specified file.
'filename' is the absolute path to the file to be written, e.g. "c:\blah\screenshot.png"
"""
self.do_command("captureScreenshot", [filename,])
def capture_screenshot_to_string(self):
"""
Capture a PNG screenshot. It then returns the file as a base 64 encoded string.
"""
return self.get_string("captureScreenshotToString", [])
def captureNetworkTraffic(self, type):
"""
Returns the network traffic seen by the browser, including headers, AJAX requests, status codes, and timings. When this function is called, the traffic log is cleared, so the returned content is only the traffic seen since the last call.
'type' is The type of data to return the network traffic as. Valid values are: json, xml, or plain.
"""
return self.get_string("captureNetworkTraffic", [type,])
def capture_network_traffic(self, type):
return self.captureNetworkTraffic(type)
def addCustomRequestHeader(self, key, value):
"""
Tells the Selenium server to add the specificed key and value as a custom outgoing request header. This only works if the browser is configured to use the built in Selenium proxy.
'key' the header name.
'value' the header value.
"""
return self.do_command("addCustomRequestHeader", [key,value,])
def add_custom_request_header(self, key, value):
return self.addCustomRequestHeader(key, value)
def capture_entire_page_screenshot_to_string(self,kwargs):
"""
Downloads a screenshot of the browser current window canvas to a
based 64 encoded PNG file. The \ *entire* windows canvas is captured,
including parts rendered outside of the current view port.
Currently this only works in Mozilla and when running in chrome mode.
'kwargs' is A kwargs string that modifies the way the screenshot is captured. Example: "background=#CCFFDD". This may be useful to set for capturing screenshots of less-than-ideal layouts, for example where absolute positioning causes the calculation of the canvas dimension to fail and a black background is exposed (possibly obscuring black text).
"""
return self.get_string("captureEntirePageScreenshotToString", [kwargs,])
def shut_down_selenium_server(self):
"""
Kills the running Selenium Server and all browser sessions. After you run this command, you will no longer be able to send
commands to the server; you can't remotely start the server once it has been stopped. Normally
you should prefer to run the "stop" command, which terminates the current browser session, rather than
shutting down the entire server.
"""
self.do_command("shutDownSeleniumServer", [])
def retrieve_last_remote_control_logs(self):
"""
Retrieve the last messages logged on a specific remote control. Useful for error reports, especially
when running multiple remote controls in a distributed environment. The maximum number of log messages
that can be retrieve is configured on remote control startup.
"""
return self.get_string("retrieveLastRemoteControlLogs", [])
def key_down_native(self,keycode):
"""
Simulates a user pressing a key (without releasing it yet) by sending a native operating system keystroke.
This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing
a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and
metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular
element, focus on the element first before running this command.
'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes!
"""
self.do_command("keyDownNative", [keycode,])
def key_up_native(self,keycode):
"""
Simulates a user releasing a key by sending a native operating system keystroke.
This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing
a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and
metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular
element, focus on the element first before running this command.
'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes!
"""
self.do_command("keyUpNative", [keycode,])
def key_press_native(self,keycode):
"""
Simulates a user pressing and releasing a key by sending a native operating system keystroke.
This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing
a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and
metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular
element, focus on the element first before running this command.
'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes!
"""
self.do_command("keyPressNative", [keycode,])
| apache-2.0 |
JulienMcJay/eclock | windows/Python27/Lib/distutils/unixccompiler.py | 90 | 12314 | """distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (possibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
__revision__ = "$Id$"
import os, sys, re
from types import StringType, NoneType
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import \
DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
if sys.platform == 'darwin':
import _osx_support
# XXX Things not currently handled:
# * optimization/debug/warning flags; we just use whatever's in Python's
# Makefile and live with it. Is this adequate? If not, we might
# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
# SunCCompiler, and I suspect down that road lies madness.
# * even if we don't know a warning flag from an optimization flag,
# we need some way for outsiders to feed preprocessor/compiler/linker
# flags in to us -- eg. a sysadmin might want to mandate certain flags
# via a site config file, or a user might want to set something for
# compiling this module distribution only via the setup.py command
# line, whatever. As long as these options come from something on the
# current system, they can be as system-dependent as they like, and we
# should just happily stuff them into the preprocessor/compiler/linker
# options and carry on.
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
# These are used by CCompiler in two places: the constructor sets
# instance attributes 'preprocessor', 'compiler', etc. from them, and
# 'set_executable()' allows any of these to be set. The defaults here
# are pretty generic; they will probably have to be set by an outsider
# (eg. using information discovered by the sysconfig about building
# Python extensions).
executables = {'preprocessor' : None,
'compiler' : ["cc"],
'compiler_so' : ["cc"],
'compiler_cxx' : ["cc"],
'linker_so' : ["cc", "-shared"],
'linker_exe' : ["cc"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
if sys.platform[:6] == "darwin":
executables['ranlib'] = ["ranlib"]
# Needed for the filename generation methods provided by the base
# class, CCompiler. NB. whoever instantiates/uses a particular
# UnixCCompiler instance should set 'shared_lib_ext' -- we set a
# reasonable common default here, but it's not necessarily used on all
# Unices!
src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".so"
dylib_lib_extension = ".dylib"
static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
if sys.platform == "cygwin":
exe_extension = ".exe"
def preprocess(self, source,
output_file=None, macros=None, include_dirs=None,
extra_preargs=None, extra_postargs=None):
ignore, macros, include_dirs = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or we're
# generating output to stdout, or there's a target output file and
# the source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
raise CompileError, msg
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _osx_support.compiler_fixup(compiler_so,
cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver +
[output_filename] +
objects + self.objects)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if type(output_dir) not in (StringType, NoneType):
raise TypeError, "'output_dir' must be a string or None"
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = (objects + self.objects +
lib_opts + ['-o', output_filename])
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == "c++" and self.compiler_cxx:
# skip over environment variable settings if /usr/bin/env
# is used to set up the linker's environment.
# This is needed on OSX. Note: this assumes that the
# normal and C++ compiler have the same environment
# settings.
i = 0
if os.path.basename(linker[0]) == "env":
i = 1
while '=' in linker[i]:
i = i + 1
linker[i] = self.compiler_cxx[i]
if sys.platform == 'darwin':
linker = _osx_support.compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "-L" + dir
def _is_gcc(self, compiler_name):
return "gcc" in compiler_name or "g++" in compiler_name
def runtime_library_dir_option(self, dir):
# XXX Hackish, at the very least. See Python bug #445902:
# http://sourceforge.net/tracker/index.php
# ?func=detail&aid=445902&group_id=5470&atid=105470
# Linkers on different platforms need different options to
# specify that directories need to be added to the list of
# directories searched for dependencies when a dynamic library
# is sought. GCC has to be told to pass the -R option through
# to the linker, whereas other compilers just know this.
# Other compilers may need something slightly different. At
# this time, there's no way to determine this information from
# the configuration data stored in the Python installation, so
# we use this hack.
compiler = os.path.basename(sysconfig.get_config_var("CC"))
if sys.platform[:6] == "darwin":
# MacOSX's linker doesn't understand the -R flag at all
return "-L" + dir
elif sys.platform[:5] == "hp-ux":
if self._is_gcc(compiler):
return ["-Wl,+s", "-L" + dir]
return ["+s", "-L" + dir]
elif sys.platform[:7] == "irix646" or sys.platform[:6] == "osf1V5":
return ["-rpath", dir]
elif self._is_gcc(compiler):
return "-Wl,-R" + dir
else:
return "-R" + dir
def library_option(self, lib):
return "-l" + lib
def find_library_file(self, dirs, lib, debug=0):
shared_f = self.library_filename(lib, lib_type='shared')
dylib_f = self.library_filename(lib, lib_type='dylib')
static_f = self.library_filename(lib, lib_type='static')
if sys.platform == 'darwin':
# On OSX users can specify an alternate SDK using
# '-isysroot', calculate the SDK root if it is specified
# (and use it further on)
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
if sys.platform == 'darwin' and (
dir.startswith('/System/') or (
dir.startswith('/usr/') and not dir.startswith('/usr/local/'))):
shared = os.path.join(sysroot, dir[1:], shared_f)
dylib = os.path.join(sysroot, dir[1:], dylib_f)
static = os.path.join(sysroot, dir[1:], static_f)
# We're second-guessing the linker here, with not much hard
# data to go on: GCC seems to prefer the shared library, so I'm
# assuming that *all* Unix C compilers do. And of course I'm
# ignoring even GCC's "-static" option. So sue me.
if os.path.exists(dylib):
return dylib
elif os.path.exists(shared):
return shared
elif os.path.exists(static):
return static
# Oops, didn't find it in *any* of 'dirs'
return None
| gpl-2.0 |
MeteorAdminz/viper | viper/modules/pymacho/MachOMainCommand.py | 6 | 1744 | # encoding: utf-8
"""
Copyright 2013 Jérémie BOUTOILLE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from struct import unpack, pack
from viper.modules.pymacho.MachOLoadCommand import MachOLoadCommand
from viper.modules.pymacho.Utils import green
class MachOMainCommand(MachOLoadCommand):
entryoff = 0
stacksize = 0
def __init__(self, macho_file=None, cmd=0):
self.cmd = cmd
if macho_file is not None:
self.parse(macho_file)
def parse(self, macho_file):
self.entryoff = unpack('<Q', macho_file.read(8))[0]
self.stacksize = unpack('<Q', macho_file.read(8))[0]
def write(self, macho_file):
before = macho_file.tell()
macho_file.write(pack('<II', self.cmd, 0x0))
macho_file.write(pack('<QQ', self.entryoff, self.stacksize))
after = macho_file.tell()
macho_file.seek(before+4)
macho_file.write(pack('<I', after-before))
macho_file.seek(after)
def display(self, before=''):
print before + green("[+]")+" LC_MAIN"
print before + "\t- entryoff : 0x%x" % self.entryoff
print before + "\t- stacksize : 0x%x" % self.stacksize
| bsd-3-clause |
cloudbase/nova-virtualbox | nova/tests/functional/v3/test_server_usage.py | 15 | 1463 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.v3 import test_servers
class ServerUsageSampleJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-server-usage'
def setUp(self):
"""setUp method for server usage."""
super(ServerUsageSampleJsonTest, self).setUp()
self.uuid = self._post_server()
def test_show(self):
response = self._do_get('servers/%s' % self.uuid)
subs = self._get_regexes()
subs['id'] = self.uuid
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_details(self):
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['id'] = self.uuid
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
| apache-2.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.5/django/views/decorators/cache.py | 106 | 3973 | from functools import wraps
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
sites.get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We need backwards compatibility with code which spells it this way:
# def my_view(): pass
# my_view = cache_page(my_view, 123)
# and this way:
# my_view = cache_page(123)(my_view)
# and this:
# my_view = cache_page(my_view, 123, key_prefix="foo")
# and this:
# my_view = cache_page(123, key_prefix="foo")(my_view)
# and possibly this way (?):
# my_view = cache_page(123, my_view)
# and also this way:
# my_view = cache_page(my_view)
# and also this way:
# my_view = cache_page()(my_view)
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
assert not kwargs, "The only keyword arguments are cache and key_prefix"
def warn():
import warnings
warnings.warn('The cache_page decorator must be called like: '
'cache_page(timeout, [cache=cache name], [key_prefix=key prefix]). '
'All other ways are deprecated.',
DeprecationWarning,
stacklevel=2)
if len(args) > 1:
assert len(args) == 2, "cache_page accepts at most 2 arguments"
warn()
if callable(args[0]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[1], cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
elif callable(args[1]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)(args[1])
else:
assert False, "cache_page must be passed a view function if called with two arguments"
elif len(args) == 1:
if callable(args[0]):
warn()
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
else:
# The One True Way
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)
else:
warn()
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
| mit |
alvarolopez/nova | nova/tests/unit/test_metadata.py | 14 | 38133 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for metadata service."""
import base64
import hashlib
import hmac
import re
try:
import cPickle as pickle
except ImportError:
import pickle
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
import webob
from nova.api.metadata import base
from nova.api.metadata import handler
from nova.api.metadata import password
from nova import block_device
from nova.compute import flavors
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova.db.sqlalchemy import api
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova.tests.unit.objects import test_security_group
from nova.virt import netutils
CONF = cfg.CONF
USER_DATA_STRING = ("This is an encoded string")
ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING)
def fake_inst_obj(context):
inst = objects.Instance(
context=context,
id=1,
user_id='fake_user',
uuid='b65cee2f-8c69-4aeb-be2f-f79742548fc2',
project_id='test',
key_name="key",
key_data="ssh-rsa AAAAB3Nzai....N3NtHw== someuser@somehost",
host='test',
launch_index=1,
reservation_id='r-xxxxxxxx',
user_data=ENCODE_USER_DATA_STRING,
image_ref=7,
kernel_id=None,
ramdisk_id=None,
vcpus=1,
fixed_ips=[],
root_device_name='/dev/sda1',
hostname='test.novadomain',
display_name='my_displayname',
metadata={},
default_ephemeral_device=None,
default_swap_device=None,
system_metadata={})
nwinfo = network_model.NetworkInfo([])
inst.info_cache = objects.InstanceInfoCache(context=context,
instance_uuid=inst.uuid,
network_info=nwinfo)
with mock.patch.object(inst, 'save'):
inst.set_flavor(flavors.get_default_flavor())
return inst
def return_non_existing_address(*args, **kwarg):
raise exception.NotFound()
def fake_InstanceMetadata(stubs, inst_data, address=None,
sgroups=None, content=None, extra_md=None,
vd_driver=None, network_info=None):
content = content or []
extra_md = extra_md or {}
if sgroups is None:
sgroups = [dict(test_security_group.fake_secgroup,
name='default')]
def sg_get(*args, **kwargs):
return sgroups
stubs.Set(api, 'security_group_get_by_instance', sg_get)
return base.InstanceMetadata(inst_data, address=address,
content=content, extra_md=extra_md,
vd_driver=vd_driver, network_info=network_info)
def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
fake_get_metadata=None, headers=None,
fake_get_metadata_by_instance_id=None, app=None):
def get_metadata_by_remote_address(address):
return mdinst
if app is None:
app = handler.MetadataRequestHandler()
if fake_get_metadata is None:
fake_get_metadata = get_metadata_by_remote_address
if stubs:
stubs.Set(app, 'get_metadata_by_remote_address', fake_get_metadata)
if fake_get_metadata_by_instance_id:
stubs.Set(app, 'get_metadata_by_instance_id',
fake_get_metadata_by_instance_id)
request = webob.Request.blank(relpath)
request.remote_addr = address
if headers is not None:
request.headers.update(headers)
response = request.get_response(app)
return response
class MetadataTestCase(test.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
def test_can_pickle_metadata(self):
# Make sure that InstanceMetadata is possible to pickle. This is
# required for memcache backend to work correctly.
md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
pickle.dumps(md, protocol=0)
def test_user_data(self):
inst = self.instance.obj_clone()
inst['user_data'] = base64.b64encode("happy")
md = fake_InstanceMetadata(self.stubs, inst)
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04')['user-data'], "happy")
def test_no_user_data(self):
inst = self.instance.obj_clone()
inst.user_data = None
md = fake_InstanceMetadata(self.stubs, inst)
obj = object()
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04').get('user-data', obj),
obj)
def test_security_groups(self):
inst = self.instance.obj_clone()
sgroups = [dict(test_security_group.fake_secgroup, name='default'),
dict(test_security_group.fake_secgroup, name='other')]
expected = ['default', 'other']
md = fake_InstanceMetadata(self.stubs, inst, sgroups=sgroups)
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['security-groups'], expected)
def test_local_hostname_fqdn(self):
md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-hostname'],
"%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
def test_format_instance_mapping(self):
# Make sure that _format_instance_mappings works.
ctxt = None
instance_ref0 = objects.Instance(**{'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
'root_device_name': None,
'default_ephemeral_device': None,
'default_swap_device': None})
instance_ref1 = objects.Instance(**{'id': 0,
'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
'root_device_name': '/dev/sda1',
'default_ephemeral_device': None,
'default_swap_device': None})
def fake_bdm_get(ctxt, uuid, use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 87654321,
'snapshot_id': None,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': True,
'device_name': '/dev/sdh'}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdb'})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdm_get)
expected = {'ami': 'sda1',
'root': '/dev/sda1',
'ephemeral0': '/dev/sdb',
'swap': '/dev/sdc',
'ebs0': '/dev/sdh'}
conductor_api.LocalAPI()
self.assertEqual(base._format_instance_mapping(ctxt,
instance_ref0), block_device._DEFAULT_MAPPINGS)
self.assertEqual(base._format_instance_mapping(ctxt,
instance_ref1), expected)
def test_pubkey(self):
md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
pubkey_ent = md.lookup("/2009-04-04/meta-data/public-keys")
self.assertEqual(base.ec2_md_print(pubkey_ent),
"0=%s" % self.instance['key_name'])
self.assertEqual(base.ec2_md_print(pubkey_ent['0']['openssh-key']),
self.instance['key_data'])
def test_image_type_ramdisk(self):
inst = self.instance.obj_clone()
inst['ramdisk_id'] = 'ari-853667c0'
md = fake_InstanceMetadata(self.stubs, inst)
data = md.lookup("/latest/meta-data/ramdisk-id")
self.assertIsNotNone(data)
self.assertTrue(re.match('ari-[0-9a-f]{8}', data))
def test_image_type_kernel(self):
inst = self.instance.obj_clone()
inst['kernel_id'] = 'aki-c2e26ff2'
md = fake_InstanceMetadata(self.stubs, inst)
data = md.lookup("/2009-04-04/meta-data/kernel-id")
self.assertTrue(re.match('aki-[0-9a-f]{8}', data))
self.assertEqual(
md.lookup("/ec2/2009-04-04/meta-data/kernel-id"), data)
def test_image_type_no_kernel_raises(self):
inst = self.instance.obj_clone()
md = fake_InstanceMetadata(self.stubs, inst)
self.assertRaises(base.InvalidMetadataPath,
md.lookup, "/2009-04-04/meta-data/kernel-id")
def test_check_version(self):
inst = self.instance.obj_clone()
md = fake_InstanceMetadata(self.stubs, inst)
self.assertTrue(md._check_version('1.0', '2009-04-04'))
self.assertFalse(md._check_version('2009-04-04', '1.0'))
self.assertFalse(md._check_version('2009-04-04', '2008-09-01'))
self.assertTrue(md._check_version('2008-09-01', '2009-04-04'))
self.assertTrue(md._check_version('2009-04-04', '2009-04-04'))
def test_InstanceMetadata_uses_passed_network_info(self):
network_info = []
self.mox.StubOutWithMock(netutils, "get_injected_network_template")
netutils.get_injected_network_template(network_info).AndReturn(False)
self.mox.ReplayAll()
base.InstanceMetadata(fake_inst_obj(self.context),
network_info=network_info)
def test_InstanceMetadata_invoke_metadata_for_config_drive(self):
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
inst_md = base.InstanceMetadata(inst)
for (path, value) in inst_md.metadata_for_config_drive():
self.assertIsNotNone(path)
def test_InstanceMetadata_queries_network_API_when_needed(self):
network_info_from_api = []
self.mox.StubOutWithMock(netutils, "get_injected_network_template")
netutils.get_injected_network_template(
network_info_from_api).AndReturn(False)
self.mox.ReplayAll()
base.InstanceMetadata(fake_inst_obj(self.context))
def test_local_ipv4(self):
nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
num_networks=2)
expected_local = "192.168.1.100"
md = fake_InstanceMetadata(self.stubs, self.instance,
network_info=nw_info, address="fake")
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(expected_local, data['meta-data']['local-ipv4'])
def test_local_ipv4_from_nw_info(self):
nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
num_networks=2)
expected_local = "192.168.1.100"
md = fake_InstanceMetadata(self.stubs, self.instance,
network_info=nw_info)
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
def test_local_ipv4_from_address(self):
expected_local = "fake"
md = fake_InstanceMetadata(self.stubs, self.instance,
network_info=[], address="fake")
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
class OpenStackMetadataTestCase(test.TestCase):
def setUp(self):
super(OpenStackMetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
def test_top_level_listing(self):
# request for /openstack/<version>/ should show metadata.json
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
result = mdinst.lookup("/openstack")
# trailing / should not affect anything
self.assertEqual(result, mdinst.lookup("/openstack/"))
# the 'content' should not show up in directory listing
self.assertNotIn(base.CONTENT_DIR, result)
self.assertIn('2012-08-10', result)
self.assertIn('latest', result)
def test_version_content_listing(self):
# request for /openstack/<version>/ should show metadata.json
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
listing = mdinst.lookup("/openstack/2012-08-10")
self.assertIn("meta_data.json", listing)
def test_returns_apis_supported_in_havana_version(self):
mdinst = fake_InstanceMetadata(self.stubs, self.instance)
havana_supported_apis = mdinst.lookup("/openstack/2013-10-17")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME,
base.VD_JSON_NAME], havana_supported_apis)
def test_returns_apis_supported_in_folsom_version(self):
mdinst = fake_InstanceMetadata(self.stubs, self.instance)
folsom_supported_apis = mdinst.lookup("/openstack/2012-08-10")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME],
folsom_supported_apis)
def test_returns_apis_supported_in_grizzly_version(self):
mdinst = fake_InstanceMetadata(self.stubs, self.instance)
grizzly_supported_apis = mdinst.lookup("/openstack/2013-04-04")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME],
grizzly_supported_apis)
def test_metadata_json(self):
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
content = [
('/etc/my.conf', "content of my.conf"),
('/root/hello', "content of /root/hello"),
]
mdinst = fake_InstanceMetadata(self.stubs, inst,
content=content)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mdjson = mdinst.lookup("/openstack/latest/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertEqual(mddict['uuid'], self.instance['uuid'])
self.assertIn('files', mddict)
self.assertIn('public_keys', mddict)
self.assertEqual(mddict['public_keys'][self.instance['key_name']],
self.instance['key_data'])
self.assertIn('launch_index', mddict)
self.assertEqual(mddict['launch_index'], self.instance['launch_index'])
# verify that each of the things we put in content
# resulted in an entry in 'files', that their content
# there is as expected, and that /content lists them.
for (path, content) in content:
fent = [f for f in mddict['files'] if f['path'] == path]
self.assertEqual(1, len(fent))
fent = fent[0]
found = mdinst.lookup("/openstack%s" % fent['content_path'])
self.assertEqual(found, content)
def test_x509_keypair(self):
# check if the x509 content is set, if the keypair type is x509.
fakes.stub_out_key_pair_funcs(self.stubs, type='x509')
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mddict = jsonutils.loads(mdjson)
# keypair is stubbed-out, so it's public_key is 'public_key'.
expected = {'name': self.instance['key_name'],
'type': 'x509',
'data': 'public_key'}
self.assertEqual([expected], mddict['keys'])
def test_extra_md(self):
# make sure extra_md makes it through to metadata
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
extra = {'foo': 'bar', 'mylist': [1, 2, 3],
'mydict': {"one": 1, "two": 2}}
mdinst = fake_InstanceMetadata(self.stubs, inst, extra_md=extra)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mddict = jsonutils.loads(mdjson)
for key, val in six.iteritems(extra):
self.assertEqual(mddict[key], val)
def test_password(self):
# make sure extra_md makes it through to metadata
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
result = mdinst.lookup("/openstack/latest/password")
self.assertEqual(result, password.handle_password)
def test_userdata(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
userdata_found = mdinst.lookup("/openstack/2012-08-10/user_data")
self.assertEqual(USER_DATA_STRING, userdata_found)
# since we had user-data in this instance, it should be in listing
self.assertIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
inst.user_data = None
mdinst = fake_InstanceMetadata(self.stubs, inst)
# since this instance had no user-data it should not be there.
self.assertNotIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
self.assertRaises(base.InvalidMetadataPath,
mdinst.lookup, "/openstack/2012-08-10/user_data")
def test_random_seed(self):
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
# verify that 2013-04-04 has the 'random' field
mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertIn("random_seed", mddict)
self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512)
# verify that older version do not have it
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
self.assertNotIn("random_seed", jsonutils.loads(mdjson))
def test_no_dashes_in_metadata(self):
# top level entries in meta_data should not contain '-' in their name
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
mdjson = jsonutils.loads(
mdinst.lookup("/openstack/latest/meta_data.json"))
self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
def test_vendor_data_presence(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self.stubs, inst)
# verify that 2013-10-17 has the vendor_data.json file
result = mdinst.lookup("/openstack/2013-10-17")
self.assertIn('vendor_data.json', result)
# verify that older version do not have it
result = mdinst.lookup("/openstack/2013-04-04")
self.assertNotIn('vendor_data.json', result)
def test_vendor_data_response(self):
inst = self.instance.obj_clone()
mydata = {'mykey1': 'value1', 'mykey2': 'value2'}
class myVdriver(base.VendorDataDriver):
def __init__(self, *args, **kwargs):
super(myVdriver, self).__init__(*args, **kwargs)
data = mydata.copy()
uuid = kwargs['instance']['uuid']
data.update({'inst_uuid': uuid})
self.data = data
def get(self):
return self.data
mdinst = fake_InstanceMetadata(self.stubs, inst, vd_driver=myVdriver)
# verify that 2013-10-17 has the vendor_data.json file
vdpath = "/openstack/2013-10-17/vendor_data.json"
vd = jsonutils.loads(mdinst.lookup(vdpath))
# the instance should be passed through, and our class copies the
# uuid through to 'inst_uuid'.
self.assertEqual(vd['inst_uuid'], inst['uuid'])
# check the other expected values
for k, v in mydata.items():
self.assertEqual(vd[k], v)
class MetadataHandlerTestCase(test.TestCase):
"""Test that metadata is returning proper values."""
def setUp(self):
super(MetadataHandlerTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
def test_callable(self):
def verify(req, meta_data):
self.assertIsInstance(meta_data, CallableMD)
return "foo"
class CallableMD(object):
def lookup(self, path_info):
return verify
response = fake_request(self.stubs, CallableMD(), "/bar")
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, "foo")
def test_root(self):
expected = "\n".join(base.VERSIONS) + "\nlatest"
response = fake_request(self.stubs, self.mdinst, "/")
self.assertEqual(response.body, expected)
response = fake_request(self.stubs, self.mdinst, "/foo/../")
self.assertEqual(response.body, expected)
def test_root_metadata_proxy_enabled(self):
self.flags(service_metadata_proxy=True,
group='neutron')
expected = "\n".join(base.VERSIONS) + "\nlatest"
response = fake_request(self.stubs, self.mdinst, "/")
self.assertEqual(response.body, expected)
response = fake_request(self.stubs, self.mdinst, "/foo/../")
self.assertEqual(response.body, expected)
def test_version_root(self):
response = fake_request(self.stubs, self.mdinst, "/2009-04-04")
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("text/plain"))
self.assertEqual(response.body, 'meta-data/\nuser-data')
response = fake_request(self.stubs, self.mdinst, "/9999-99-99")
self.assertEqual(response.status_int, 404)
def test_json_data(self):
fakes.stub_out_key_pair_funcs(self.stubs)
response = fake_request(self.stubs, self.mdinst,
"/openstack/latest/meta_data.json")
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("application/json"))
response = fake_request(self.stubs, self.mdinst,
"/openstack/latest/vendor_data.json")
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("application/json"))
def test_user_data_non_existing_fixed_address(self):
self.stubs.Set(network_api.API, 'get_fixed_ip_by_address',
return_non_existing_address)
response = fake_request(None, self.mdinst, "/2009-04-04/user-data",
"127.1.1.1")
self.assertEqual(response.status_int, 404)
def test_fixed_address_none(self):
response = fake_request(None, self.mdinst,
relpath="/2009-04-04/user-data", address=None)
self.assertEqual(response.status_int, 500)
def test_invalid_path_is_404(self):
response = fake_request(self.stubs, self.mdinst,
relpath="/2009-04-04/user-data-invalid")
self.assertEqual(response.status_int, 404)
def test_user_data_with_use_forwarded_header(self):
expected_addr = "192.192.192.2"
def fake_get_metadata(address):
if address == expected_addr:
return self.mdinst
else:
raise Exception("Expected addr of %s, got %s" %
(expected_addr, address))
self.flags(use_forwarded_for=True)
response = fake_request(self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="168.168.168.1",
fake_get_metadata=fake_get_metadata,
headers={'X-Forwarded-For': expected_addr})
self.assertEqual(response.status_int, 200)
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("text/plain"))
self.assertEqual(response.body,
base64.b64decode(self.instance['user_data']))
response = fake_request(self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="168.168.168.1",
fake_get_metadata=fake_get_metadata,
headers=None)
self.assertEqual(response.status_int, 500)
@mock.patch('nova.utils.constant_time_compare')
def test_by_instance_id_uses_constant_time_compare(self, mock_compare):
mock_compare.side_effect = test.TestingException
req = webob.Request.blank('/')
hnd = handler.MetadataRequestHandler()
req.headers['X-Instance-ID'] = 'fake-inst'
req.headers['X-Instance-ID-Signature'] = 'fake-sig'
req.headers['X-Tenant-ID'] = 'fake-proj'
self.assertRaises(test.TestingException,
hnd._handle_instance_id_request, req)
self.assertEqual(1, mock_compare.call_count)
def test_user_data_with_neutron_instance_id(self):
expected_instance_id = 'a-b-c-d'
def fake_get_metadata(instance_id, remote_address):
if remote_address is None:
raise Exception('Expected X-Forwared-For header')
elif instance_id == expected_instance_id:
return self.mdinst
else:
# raise the exception to aid with 500 response code test
raise Exception("Expected instance_id of %s, got %s" %
(expected_instance_id, instance_id))
signed = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
expected_instance_id,
hashlib.sha256).hexdigest()
# try a request with service disabled
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
headers={'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 200)
# now enable the service
self.flags(service_metadata_proxy=True,
group='neutron')
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 200)
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("text/plain"))
self.assertEqual(response.body,
base64.b64decode(self.instance['user_data']))
# mismatched signature
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': ''})
self.assertEqual(response.status_int, 403)
# missing X-Tenant-ID from request
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 400)
# mismatched X-Tenant-ID
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'FAKE',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 404)
# without X-Forwarded-For
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
headers={'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 500)
# unexpected Instance-ID
signed = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
'z-z-z-z',
hashlib.sha256).hexdigest()
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'z-z-z-z',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 500)
def test_get_metadata(self):
def _test_metadata_path(relpath):
# recursively confirm a http 200 from all meta-data elements
# available at relpath.
response = fake_request(self.stubs, self.mdinst,
relpath=relpath)
for item in response.body.split('\n'):
if 'public-keys' in relpath:
# meta-data/public-keys/0=keyname refers to
# meta-data/public-keys/0
item = item.split('=')[0]
if item.endswith('/'):
path = relpath + '/' + item
_test_metadata_path(path)
continue
path = relpath + '/' + item
response = fake_request(self.stubs, self.mdinst, relpath=path)
self.assertEqual(response.status_int, 200, message=path)
_test_metadata_path('/2009-04-04/meta-data')
def _metadata_handler_with_instance_id(self, hnd):
expected_instance_id = 'a-b-c-d'
signed = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
expected_instance_id,
hashlib.sha256).hexdigest()
self.flags(service_metadata_proxy=True, group='neutron')
response = fake_request(
None, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata=False,
app=hnd,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(200, response.status_int)
self.assertEqual(base64.b64decode(self.instance['user_data']),
response.body)
@mock.patch.object(base, 'get_metadata_by_instance_id')
def test_metadata_handler_with_instance_id(self, get_by_uuid):
# test twice to ensure that the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=15)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_instance_id(hnd)
self._metadata_handler_with_instance_id(hnd)
self.assertEqual(1, get_by_uuid.call_count)
@mock.patch.object(base, 'get_metadata_by_instance_id')
def test_metadata_handler_with_instance_id_no_cache(self, get_by_uuid):
# test twice to ensure that disabling the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=0)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_instance_id(hnd)
self._metadata_handler_with_instance_id(hnd)
self.assertEqual(2, get_by_uuid.call_count)
def _metadata_handler_with_remote_address(self, hnd):
response = fake_request(
None, self.mdinst,
fake_get_metadata=False,
app=hnd,
relpath="/2009-04-04/user-data",
address="192.192.192.2")
self.assertEqual(200, response.status_int)
self.assertEqual(base64.b64decode(self.instance.user_data),
response.body)
@mock.patch.object(base, 'get_metadata_by_address')
def test_metadata_handler_with_remote_address(self, get_by_uuid):
# test twice to ensure that the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=15)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_remote_address(hnd)
self._metadata_handler_with_remote_address(hnd)
self.assertEqual(1, get_by_uuid.call_count)
@mock.patch.object(base, 'get_metadata_by_address')
def test_metadata_handler_with_remote_address_no_cache(self, get_by_uuid):
# test twice to ensure that disabling the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=0)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_remote_address(hnd)
self._metadata_handler_with_remote_address(hnd)
self.assertEqual(2, get_by_uuid.call_count)
class MetadataPasswordTestCase(test.TestCase):
def setUp(self):
super(MetadataPasswordTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
self.flags(use_local=True, group='conductor')
def test_get_password(self):
request = webob.Request.blank('')
self.mdinst.password = 'foo'
result = password.handle_password(request, self.mdinst)
self.assertEqual(result, 'foo')
def test_bad_method(self):
request = webob.Request.blank('')
request.method = 'PUT'
self.assertRaises(webob.exc.HTTPBadRequest,
password.handle_password, request, self.mdinst)
@mock.patch('nova.objects.Instance.get_by_uuid')
def _try_set_password(self, get_by_uuid, val='bar'):
request = webob.Request.blank('')
request.method = 'POST'
request.body = val
get_by_uuid.return_value = self.instance
with mock.patch.object(self.instance, 'save') as save:
password.handle_password(request, self.mdinst)
save.assert_called_once_with()
self.assertIn('password_0', self.instance.system_metadata)
def test_set_password(self):
self.mdinst.password = ''
self._try_set_password()
def test_conflict(self):
self.mdinst.password = 'foo'
self.assertRaises(webob.exc.HTTPConflict,
self._try_set_password)
def test_too_large(self):
self.mdinst.password = ''
self.assertRaises(webob.exc.HTTPBadRequest,
self._try_set_password,
val=('a' * (password.MAX_SIZE + 1)))
| apache-2.0 |
amallia/zulip | api/integrations/trac/zulip_trac.py | 114 | 5142 | # -*- coding: utf-8 -*-
# Copyright © 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Zulip trac plugin -- sends zulips when tickets change.
#
# Install by copying this file and zulip_trac_config.py to the trac
# plugins/ subdirectory, customizing the constants in
# zulip_trac_config.py, and then adding "zulip_trac" to the
# components section of the conf/trac.ini file, like so:
#
# [components]
# zulip_trac = enabled
#
# You may then need to restart trac (or restart Apache) for the bot
# (or changes to the bot) to actually be loaded by trac.
from trac.core import Component, implements
from trac.ticket import ITicketChangeListener
import sys
import os.path
sys.path.insert(0, os.path.dirname(__file__))
import zulip_trac_config as config
VERSION = "0.9"
if config.ZULIP_API_PATH is not None:
sys.path.append(config.ZULIP_API_PATH)
import zulip
client = zulip.Client(
email=config.ZULIP_USER,
site=config.ZULIP_SITE,
api_key=config.ZULIP_API_KEY,
client="ZulipTrac/" + VERSION)
def markdown_ticket_url(ticket, heading="ticket"):
return "[%s #%s](%s/%s)" % (heading, ticket.id, config.TRAC_BASE_TICKET_URL, ticket.id)
def markdown_block(desc):
return "\n\n>" + "\n> ".join(desc.split("\n")) + "\n"
def truncate(string, length):
if len(string) <= length:
return string
return string[:length - 3] + "..."
def trac_subject(ticket):
return truncate("#%s: %s" % (ticket.id, ticket.values.get("summary")), 60)
def send_update(ticket, content):
client.send_message({
"type": "stream",
"to": config.STREAM_FOR_NOTIFICATIONS,
"content": content,
"subject": trac_subject(ticket)
})
class ZulipPlugin(Component):
implements(ITicketChangeListener)
def ticket_created(self, ticket):
"""Called when a ticket is created."""
content = "%s created %s in component **%s**, priority **%s**:\n" % \
(ticket.values.get("reporter"), markdown_ticket_url(ticket),
ticket.values.get("component"), ticket.values.get("priority"))
# Include the full subject if it will be truncated
if len(ticket.values.get("summary")) > 60:
content += "**%s**\n" % (ticket.values.get("summary"),)
if ticket.values.get("description") != "":
content += "%s" % (markdown_block(ticket.values.get("description")),)
send_update(ticket, content)
def ticket_changed(self, ticket, comment, author, old_values):
"""Called when a ticket is modified.
`old_values` is a dictionary containing the previous values of the
fields that have changed.
"""
if not (set(old_values.keys()).intersection(set(config.TRAC_NOTIFY_FIELDS)) or
(comment and "comment" in set(config.TRAC_NOTIFY_FIELDS))):
return
content = "%s updated %s" % (author, markdown_ticket_url(ticket))
if comment:
content += ' with comment: %s\n\n' % (markdown_block(comment),)
else:
content += ":\n\n"
field_changes = []
for key in old_values.keys():
if key == "description":
content += '- Changed %s from %s to %s' % (key, markdown_block(old_values.get(key)),
markdown_block(ticket.values.get(key)))
elif old_values.get(key) == "":
field_changes.append('%s: => **%s**' % (key, ticket.values.get(key)))
elif ticket.values.get(key) == "":
field_changes.append('%s: **%s** => ""' % (key, old_values.get(key)))
else:
field_changes.append('%s: **%s** => **%s**' % (key, old_values.get(key),
ticket.values.get(key)))
content += ", ".join(field_changes)
send_update(ticket, content)
def ticket_deleted(self, ticket):
"""Called when a ticket is deleted."""
content = "%s was deleted." % markdown_ticket_url(ticket, heading="Ticket")
send_update(ticket, content)
| apache-2.0 |
dougwig/a10-neutron-lbaas | a10_neutron_lbaas/tests/unit/v1/test_handler_pool.py | 1 | 4287 | # Copyright 2014, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fake_objs
import test_base
class TestPools(test_base.UnitTestBase):
def test_create(self):
methods = {
'ROUND_ROBIN':
self.a.last_client.slb.service_group.ROUND_ROBIN,
'LEAST_CONNECTIONS':
self.a.last_client.slb.service_group.LEAST_CONNECTION,
'SOURCE_IP':
self.a.last_client.slb.service_group.WEIGHTED_LEAST_CONNECTION,
}
protocols = {
'TCP': self.a.last_client.slb.service_group.TCP,
'UDP': self.a.last_client.slb.service_group.UDP,
}
for p in protocols.keys():
for m in methods.keys():
self.a.reset_mocks()
saw_exception = False
pool = fake_objs.FakePool(p, m)
self.a.pool.create(None, pool)
self.print_mocks()
(self.a.last_client.slb.service_group.create.
assert_called())
if not saw_exception:
n = str(self.a.last_client.mock_calls).index(
'slb.service_group.create')
self.assertTrue(n >= 0)
def test_update(self):
old_pool = fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS')
pool = fake_objs.FakePool('TCP', 'ROUND_ROBIN')
self.a.pool.update(None, old_pool, pool)
self.print_mocks()
self.a.last_client.slb.service_group.update.assert_called()
def _test_delete(self, pool):
self.a.pool.delete(None, pool)
self.print_mocks()
def test_delete(self):
pool = fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS')
pool.members = [fake_objs.FakeMember()]
pool.health_monitors_status = [{'monitor_id': 'hm1', "pools": [pool]}]
self.a.pool.neutron.openstack_driver._pool_get_hm.return_value = fake_objs.FakeHM()
self._test_delete(pool)
(self.a.last_client.slb.service_group.delete.
assert_called_with(pool['id']))
def test_delete_with_hm_dissociates_hm(self):
pool = fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS')
hm = fake_objs.FakeHM("TCP")
hm.pools.append(fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS'))
pool.members = [fake_objs.FakeMember()]
pool.health_monitors_status = [{'monitor_id': 'hm1', "pools": [pool]}]
self.a.pool.neutron.openstack_driver._pool_get_hm.return_value = hm
self._test_delete(pool)
self.a.last_client.slb.service_group.update.assert_called_with(
"id1",
health_monitor="",
health_check_disable=True)
def test_delete_without_health_monitor(self):
pool = fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS')
pool.members = [fake_objs.FakePool()]
pool.health_monitors_status = []
self._test_delete(pool)
(self.a.last_client.slb.service_group.delete.
assert_called_with(pool.id))
def test_delete_removes_monitor(self):
pool = fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS')
pool.members = [fake_objs.FakeMember()]
pool.health_monitors_status = [{'monitor_id': "hm1"}]
self.a.pool.delete(None, pool)
self.a.last_client.slb.hm.delete.assert_called()
def test_stats(self):
pool = fake_objs.FakePool('TCP', 'LEAST_CONNECTIONS')
z = self.a.pool
z.neutron.pool_get_tenant_id = lambda x, y: 'hello'
z._get_vip_id = lambda x, y: '2.2.2.2'
z.stats(None, pool.id)
self.print_mocks()
s = str(self.a.last_client.mock_calls)
self.assertTrue(s.index('slb.virtual_server.stats') >= 0)
| apache-2.0 |
xwv/shadowsocks | shadowsocks/asyncdns.py | 655 | 17416 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self, server_list=None):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
| apache-2.0 |
lilida/teletraan | deploy-board/integ_test/auto_deploy_tests.py | 8 | 9393 | # Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import unittest
import time
import commons
import sys
builds_helper = commons.get_build_helper()
environs_helper = commons.get_environ_helper()
systems_helper = commons.get_system_helper()
deploys_helper = commons.get_deploy_helper()
CANARY = "canary"
PROD = "prod"
class TestAutoDeploy(unittest.TestCase):
def setUp(self):
self.host = "test-cd-host-" + commons.gen_random_num()
self.envName = "test-cd-env-" + commons.gen_random_num()
self.env_canary = commons.create_env(self.envName, CANARY)
self.env_prod = commons.create_env(self.envName, PROD)
self.commit = commons.gen_random_num(32)
self.build = commons.publish_build(self.envName, commit=self.commit)
def tearDown(self):
environs_helper.delete_env(commons.REQUEST, self.envName, CANARY)
environs_helper.delete_env(commons.REQUEST, self.envName, PROD)
builds_helper.delete_build(commons.REQUEST, self.build['id'])
def _fail_deploy(self, deploy, stage=PROD):
pingRequest = {}
pingRequest['hostId'] = self.host
pingRequest['hostName'] = self.host
pingRequest['hostIp'] = "8.8.8.8"
report = {}
report['envId'] = self.env_prod['id']
report['deployId'] = deploy['id']
report['deployStage'] = "RESTARTING"
report['agentStatus'] = "TOO_MANY_RETRY"
pingRequest['reports'] = [report]
systems_helper.ping(commons.REQUEST, pingRequest)
def _assertDeploy(self, stage, expect_commit):
count = 0
while (count < 300):
env = environs_helper.get_env_by_stage(commons.REQUEST, self.envName, stage)
deployId = env.get('deployId')
if deployId:
deploy = deploys_helper.get(commons.REQUEST, env['deployId'])
build = builds_helper.get_build(commons.REQUEST, deploy['buildId'])
if build['commit'] == expect_commit:
return
print("."),
sys.stdout.flush()
time.sleep(1)
count = count + 1
self.fail("Timed out when wait for deploy for %s to happen" % expect_commit)
def testBuildToCanaryToProd(self):
data = {}
data["type"] = "AUTO"
data["predStage"] = "BUILD"
environs_helper.update_env_promotes_config(commons.REQUEST, self.envName,
CANARY, data=data)
data["predStage"] = CANARY
environs_helper.update_env_promotes_config(commons.REQUEST, self.envName,
PROD, data=data)
self._assertDeploy(PROD, self.commit)
deployId = \
environs_helper.get_env_by_stage(commons.REQUEST, self.envName, CANARY)['deployId']
deploys_helper.delete(commons.REQUEST, deployId)
deployId = environs_helper.get_env_by_stage(commons.REQUEST, self.envName, PROD)['deployId']
deploys_helper.delete(commons.REQUEST, deployId)
def testDisablePolicyAuto(self):
data = {}
data["type"] = "AUTO"
data["disablePolicy"] = "AUTO"
environs_helper.update_env_promotes_config(commons.REQUEST, self.envName, PROD, data=data)
deploy = deploys_helper.deploy(commons.REQUEST, self.envName, PROD, self.build['id'])
promote_config = environs_helper.get_env_promotes_config(commons.REQUEST, self.envName,
PROD)
self.assertEquals(promote_config['type'], "MANUAL")
deploys_helper.delete(commons.REQUEST, deploy['id'])
def testDisablePolicyManual(self):
data = {}
data["type"] = "AUTO"
data["disablePolicy"] = "MANUAL"
environs_helper.update_env_promotes_config(commons.REQUEST, self.envName, PROD, data=data)
deploy = deploys_helper.deploy(commons.REQUEST, self.envName, PROD, self.build['id'])
promote_config = environs_helper.get_env_promotes_config(commons.REQUEST, self.envName,
PROD)
self.assertEquals(promote_config['type'], "AUTO")
deploys_helper.delete(commons.REQUEST, deploy['id'])
def _assertState(self, stage, expect_state):
count = 0
while (count < 150):
promote_config = environs_helper.get_env_promotes_config(commons.REQUEST, self.envName,
stage)
if promote_config['type'] == expect_state:
return
print("."),
sys.stdout.flush()
time.sleep(1)
count = count + 1
self.fail("Timed out when wait for promote state to be %s" % expect_state)
def testFailPolicyContinue(self):
deploy = deploys_helper.deploy(commons.REQUEST, self.envName, PROD, self.build['id'])
environs_helper.update_env_capacity(commons.REQUEST, self.envName,
PROD, capacity_type="HOST", data=[self.host])
self._fail_deploy(deploy)
# make deploy fail happens faster
deploys_helper.update_progress(commons.REQUEST, self.envName, PROD)
data = {}
data["type"] = "AUTO"
data["disablePolicy"] = "MANUAL"
data["predStage"] = CANARY
data["failPolicy"] = "CONTINUE"
environs_helper.update_env_promotes_config(commons.REQUEST, self.envName, PROD, data=data)
promote_config = environs_helper.get_env_promotes_config(commons.REQUEST, self.envName,
PROD)
self.assertEquals(promote_config['type'], "AUTO")
deploys_helper.delete(commons.REQUEST, deploy['id'])
# This so that we can delete the env
environs_helper.update_env_capacity(commons.REQUEST, self.envName, PROD,
capacity_type="HOST", data=[])
def testFailPolicyDisable(self):
environs_helper.update_env_capacity(commons.REQUEST, self.envName,
PROD, capacity_type="HOST", data=[self.host])
deploy = deploys_helper.deploy(commons.REQUEST, self.envName, PROD, self.build['id'])
self._fail_deploy(deploy)
# make deploy fail happens faster
deploys_helper.update_progress(commons.REQUEST, self.envName, PROD)
data = {}
data["type"] = "AUTO"
data["disablePolicy"] = "MANUAL"
data["predStage"] = CANARY
data["failPolicy"] = "DISABLE"
environs_helper.update_env_promotes_config(commons.REQUEST, self.envName, PROD, data=data)
self._assertState(PROD, "MANUAL")
deploys_helper.delete(commons.REQUEST, deploy['id'])
# This so that we can delete the env
environs_helper.update_env_capacity(commons.REQUEST, self.envName, PROD,
capacity_type="HOST", data=[])
def testFailPolicyROLLBACK(self):
commit_0 = commons.gen_random_num(32)
build_0 = commons.publish_build(self.envName, commit=commit_0)
deploy_0 = deploys_helper.deploy(commons.REQUEST, self.envName, PROD, build_0['id'])
deploys_helper.update_progress(commons.REQUEST, self.envName, PROD)
environs_helper.update_env_capacity(commons.REQUEST, self.envName,
PROD, capacity_type="HOST", data=[self.host])
deploy = deploys_helper.deploy(commons.REQUEST, self.envName, PROD, self.build['id'])
self._fail_deploy(deploy)
deploys_helper.update_progress(commons.REQUEST, self.envName, PROD)
data = {}
data["type"] = "AUTO"
data["disablePolicy"] = "MANUAL"
data["predStage"] = CANARY
data["failPolicy"] = "ROLLBACK"
environs_helper.update_env_promotes_config(commons.REQUEST, self.envName, PROD, data=data)
self._assertState(PROD, "MANUAL")
new_env = environs_helper.get_env_by_stage(commons.REQUEST, self.envName, PROD)
new_deploy = deploys_helper.get(commons.REQUEST, new_env['deployId'])
new_build = builds_helper.get_build(commons.REQUEST, new_deploy['buildId'])
self.assertEquals(new_build['commit'], commit_0)
builds_helper.delete_build(commons.REQUEST, build_0['id'])
deploys_helper.delete(commons.REQUEST, deploy_0['id'])
deploys_helper.delete(commons.REQUEST, deploy['id'])
deploys_helper.delete(commons.REQUEST, new_deploy['id'])
# This so that we can delete the env
environs_helper.update_env_capacity(commons.REQUEST, self.envName, PROD,
capacity_type="HOST", data=[])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
sdelements/fabric | tests/test_project.py | 38 | 5250 | import unittest
import os
import fudge
from fudge.inspector import arg
from fabric.contrib import project
class UploadProjectTestCase(unittest.TestCase):
"""Test case for :func: `fabric.contrib.project.upload_project`."""
fake_tmp = "testtempfolder"
def setUp(self):
fudge.clear_expectations()
# We need to mock out run, local, and put
self.fake_run = fudge.Fake('project.run', callable=True)
self.patched_run = fudge.patch_object(
project,
'run',
self.fake_run
)
self.fake_local = fudge.Fake('local', callable=True)
self.patched_local = fudge.patch_object(
project,
'local',
self.fake_local
)
self.fake_put = fudge.Fake('put', callable=True)
self.patched_put = fudge.patch_object(
project,
'put',
self.fake_put
)
# We don't want to create temp folders
self.fake_mkdtemp = fudge.Fake(
'mkdtemp',
expect_call=True
).returns(self.fake_tmp)
self.patched_mkdtemp = fudge.patch_object(
project,
'mkdtemp',
self.fake_mkdtemp
)
def tearDown(self):
self.patched_run.restore()
self.patched_local.restore()
self.patched_put.restore()
fudge.clear_expectations()
@fudge.with_fakes
def test_temp_folder_is_used(self):
"""A unique temp folder is used for creating the archive to upload."""
# Exercise
project.upload_project()
@fudge.with_fakes
def test_project_is_archived_locally(self):
"""The project should be archived locally before being uploaded."""
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_local.with_args(arg.startswith("tar -czf")).next_call()
# Exercise
project.upload_project()
@fudge.with_fakes
def test_current_directory_is_uploaded_by_default(self):
"""By default the project uploaded is the current working directory."""
cwd_path, cwd_name = os.path.split(os.getcwd())
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_local.with_args(
arg.endswith("-C %s %s" % (cwd_path, cwd_name))
).next_call()
# Exercise
project.upload_project()
@fudge.with_fakes
def test_path_to_local_project_can_be_specified(self):
"""It should be possible to specify which local folder to upload."""
project_path = "path/to/my/project"
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_local.with_args(
arg.endswith("-C %s %s" % os.path.split(project_path))
).next_call()
# Exercise
project.upload_project(local_dir=project_path)
@fudge.with_fakes
def test_path_to_local_project_can_end_in_separator(self):
"""A local path ending in a separator should be handled correctly."""
project_path = "path/to/my"
base = "project"
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_local.with_args(
arg.endswith("-C %s %s" % (project_path, base))
).next_call()
# Exercise
project.upload_project(local_dir="%s/%s/" % (project_path, base))
@fudge.with_fakes
def test_default_remote_folder_is_home(self):
"""Project is uploaded to remote home by default."""
local_dir = "folder"
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_put.with_args(
"%s/folder.tar.gz" % self.fake_tmp, "folder.tar.gz", use_sudo=False
).next_call()
# Exercise
project.upload_project(local_dir=local_dir)
@fudge.with_fakes
def test_path_to_remote_folder_can_be_specified(self):
"""It should be possible to specify which local folder to upload to."""
local_dir = "folder"
remote_path = "path/to/remote/folder"
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_put.with_args(
"%s/folder.tar.gz" % self.fake_tmp, "%s/folder.tar.gz" % remote_path, use_sudo=False
).next_call()
# Exercise
project.upload_project(local_dir=local_dir, remote_dir=remote_path)
| bsd-2-clause |
bmbouter/kombu | kombu/tests/test_log.py | 8 | 5035 | from __future__ import absolute_import
import logging
import sys
from kombu.log import (
NullHandler,
get_logger,
get_loglevel,
safeify_format,
Log,
LogMixin,
setup_logging,
)
from .case import Case, Mock, patch
class test_NullHandler(Case):
def test_emit(self):
h = NullHandler()
h.emit('record')
class test_get_logger(Case):
def test_when_string(self):
l = get_logger('foo')
self.assertIs(l, logging.getLogger('foo'))
h1 = l.handlers[0]
self.assertIsInstance(h1, NullHandler)
def test_when_logger(self):
l = get_logger(logging.getLogger('foo'))
h1 = l.handlers[0]
self.assertIsInstance(h1, NullHandler)
def test_with_custom_handler(self):
l = logging.getLogger('bar')
handler = NullHandler()
l.addHandler(handler)
l = get_logger('bar')
self.assertIs(l.handlers[0], handler)
def test_get_loglevel(self):
self.assertEqual(get_loglevel('DEBUG'), logging.DEBUG)
self.assertEqual(get_loglevel('ERROR'), logging.ERROR)
self.assertEqual(get_loglevel(logging.INFO), logging.INFO)
class test_safe_format(Case):
def test_formatting(self):
fmt = 'The %r jumped %x over the %s'
args = ['frog', 'foo', 'elephant']
res = list(safeify_format(fmt, args))
self.assertListEqual(res, ["'frog'", 'foo', 'elephant'])
class test_LogMixin(Case):
def setup(self):
self.log = Log('Log', Mock())
self.logger = self.log.logger
def test_debug(self):
self.log.debug('debug')
self.logger.log.assert_called_with(logging.DEBUG, 'Log - debug')
def test_info(self):
self.log.info('info')
self.logger.log.assert_called_with(logging.INFO, 'Log - info')
def test_warning(self):
self.log.warn('warning')
self.logger.log.assert_called_with(logging.WARN, 'Log - warning')
def test_error(self):
self.log.error('error', exc_info='exc')
self.logger.log.assert_called_with(
logging.ERROR, 'Log - error', exc_info='exc',
)
def test_critical(self):
self.log.critical('crit', exc_info='exc')
self.logger.log.assert_called_with(
logging.CRITICAL, 'Log - crit', exc_info='exc',
)
def test_error_when_DISABLE_TRACEBACKS(self):
from kombu import log
log.DISABLE_TRACEBACKS = True
try:
self.log.error('error')
self.logger.log.assert_called_with(logging.ERROR, 'Log - error')
finally:
log.DISABLE_TRACEBACKS = False
def test_get_loglevel(self):
self.assertEqual(self.log.get_loglevel('DEBUG'), logging.DEBUG)
self.assertEqual(self.log.get_loglevel('ERROR'), logging.ERROR)
self.assertEqual(self.log.get_loglevel(logging.INFO), logging.INFO)
def test_is_enabled_for(self):
self.logger.isEnabledFor.return_value = True
self.assertTrue(self.log.is_enabled_for('DEBUG'))
self.logger.isEnabledFor.assert_called_with(logging.DEBUG)
def test_LogMixin_get_logger(self):
self.assertIs(LogMixin().get_logger(),
logging.getLogger('LogMixin'))
def test_Log_get_logger(self):
self.assertIs(Log('test_Log').get_logger(),
logging.getLogger('test_Log'))
def test_log_when_not_enabled(self):
self.logger.isEnabledFor.return_value = False
self.log.debug('debug')
self.assertFalse(self.logger.log.called)
def test_log_with_format(self):
self.log.debug('Host %r removed', 'example.com')
self.logger.log.assert_called_with(
logging.DEBUG, 'Log - Host %s removed', "'example.com'",
)
class test_setup_logging(Case):
@patch('logging.getLogger')
def test_set_up_default_values(self, getLogger):
logger = logging.getLogger.return_value = Mock()
logger.handlers = []
setup_logging()
logger.setLevel.assert_called_with(logging.ERROR)
self.assertTrue(logger.addHandler.called)
ah_args, _ = logger.addHandler.call_args
handler = ah_args[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertIs(handler.stream, sys.__stderr__)
@patch('logging.getLogger')
@patch('kombu.log.WatchedFileHandler')
def test_setup_custom_values(self, getLogger, WatchedFileHandler):
logger = logging.getLogger.return_value = Mock()
logger.handlers = []
setup_logging(loglevel=logging.DEBUG, logfile='/var/logfile')
logger.setLevel.assert_called_with(logging.DEBUG)
self.assertTrue(logger.addHandler.called)
self.assertTrue(WatchedFileHandler.called)
@patch('logging.getLogger')
def test_logger_already_setup(self, getLogger):
logger = logging.getLogger.return_value = Mock()
logger.handlers = [Mock()]
setup_logging()
self.assertFalse(logger.setLevel.called)
| bsd-3-clause |
jlowdermilk/test-infra | gubernator/pull_request_test.py | 16 | 2645 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pull_request
def make(number, version, result, start_time=1000):
started = None if version is None else {
'timestamp': start_time, 'version': version}
finished = result and {'result': result}
return (number, started, finished)
def makePodutil(number, revision, result, start_time=1000):
started = {'timestamp': start_time}
finished = result and {'result': result, 'revision': revision}
return (number, started, finished)
class TableTest(unittest.TestCase):
def test_builds_to_table(self):
jobs = {'J1': [make(4, 'v2', 'A', 9), make(3, 'v2', 'B', 10)],
'J2': [make(5, 'v1', 'C', 7), make(4, 'v1', 'D', 6)]}
max_builds, headings, rows = pull_request.builds_to_table(jobs)
self.assertEqual(max_builds, 4)
self.assertEqual(headings, [('v2', 2, 9), ('v1', 2, 6)])
self.assertEqual(rows, [('J1', [(4, 'A'), (3, 'B')]),
('J2', [None, None, (5, 'C'), (4, 'D')])])
def test_builds_to_table_no_header(self):
jobs = {'J': [make(5, None, 'A', 3), make(4, '', 'B', 2)]}
self.assertEqual(pull_request.builds_to_table(jobs),
(0, [], [('J', [(5, 'A'), (4, 'B')])]))
def test_pull_ref_commit(self):
jobs = {'J1': [make(4, 'v2', 'A', 9)]}
jobs['J1'][0][1]['pull'] = 'master:1234,35:abcd'
_, headings, _ = pull_request.builds_to_table(jobs)
self.assertEqual(headings, [('abcd', 1, 9)])
def test_builds_to_table_podutils(self):
jobs = {'J1': [makePodutil(4, 'v2', 'A', 9), makePodutil(3, 'v2', 'B', 10)],
'J2': [makePodutil(5, 'v1', 'C', 7), makePodutil(4, 'v1', 'D', 6)]}
max_builds, headings, rows = pull_request.builds_to_table(jobs)
self.assertEqual(max_builds, 4)
self.assertEqual(headings, [('v2', 2, 9), ('v1', 2, 6)])
self.assertEqual(rows, [('J1', [(4, 'A'), (3, 'B')]),
('J2', [None, None, (5, 'C'), (4, 'D')])])
| apache-2.0 |
GunoH/intellij-community | python/helpers/pydev/pydev_tests_mainloop/gui-glut.py | 100 | 1564 | #!/usr/bin/env python
"""Simple GLUT example to manually test event loop integration.
To run this:
1) Enable the PyDev GUI event loop integration for glut
2) do an execfile on this script
3) ensure you have a working GUI simultaneously with an
interactive console
4) run: gl.glClearColor(1,1,1,1)
"""
if __name__ == '__main__':
#!/usr/bin/env python
import sys
import OpenGL.GL as gl
import OpenGL.GLUT as glut
def close():
glut.glutDestroyWindow(glut.glutGetWindow())
def display():
gl.glClear (gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
glut.glutSwapBuffers()
def resize(width,height):
gl.glViewport(0, 0, width, height+4)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, width, 0, height+4, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
if glut.glutGetWindow() > 0:
interactive = True
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut.GLUT_DOUBLE |
glut.GLUT_RGBA |
glut.GLUT_DEPTH)
else:
interactive = False
glut.glutCreateWindow('gui-glut')
glut.glutDisplayFunc(display)
glut.glutReshapeFunc(resize)
# This is necessary on osx to be able to close the window
# (else the close button is disabled)
if sys.platform == 'darwin' and not bool(glut.HAVE_FREEGLUT):
glut.glutWMCloseFunc(close)
gl.glClearColor(0,0,0,1)
if not interactive:
glut.glutMainLoop()
| apache-2.0 |
bohdan-shramko/learning-python | source/sublime-packages/Packages/python-markdown/st3/markdown/__init__.py | 24 | 20784 | """
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
html = markdown.markdown(your_text_string)
See <https://pythonhosted.org/Markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007-2013 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE for details).
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from .__version__ import version, version_info # noqa
import codecs
import sys
import logging
import warnings
import importlib
from . import util
from .preprocessors import build_preprocessors
from .blockprocessors import build_block_parser
from .treeprocessors import build_treeprocessors
from .inlinepatterns import build_inlinepatterns
from .postprocessors import build_postprocessors
from .extensions import Extension
from .serializers import to_html_string, to_xhtml_string
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
logger = logging.getLogger('MARKDOWN')
class Markdown(object):
"""Convert Markdown to HTML."""
doc_tag = "div" # Element used to wrap document - later removed
option_defaults = {
'html_replacement_text': '[HTML_REMOVED]',
'tab_length': 4,
'enable_attributes': True,
'smart_emphasis': True,
'lazy_ol': True,
}
output_formats = {
'html': to_html_string,
'html4': to_html_string,
'html5': to_html_string,
'xhtml': to_xhtml_string,
'xhtml1': to_xhtml_string,
'xhtml5': to_xhtml_string,
}
ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
'(', ')', '>', '#', '+', '-', '.', '!']
def __init__(self, *args, **kwargs):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension_configs: Configuration settings for extensions.
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml5": Outputs XHTML style tags of HTML 5
* "xhtml": Outputs latest supported version of XHTML
(currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html5": Outputs HTML style tags of HTML 5
* "html": Outputs latest supported version of HTML
(currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
* safe_mode: Deprecated! Disallow raw html. One of "remove", "replace"
or "escape".
* html_replacement_text: Deprecated! Text used when safe_mode is set
to "replace".
* tab_length: Length of tabs in the source. Default: 4
* enable_attributes: Enable the conversion of attributes. Default: True
* smart_emphasis: Treat `_connected_words_` intelligently Default: True
* lazy_ol: Ignore number of first item of ordered lists. Default: True
"""
# For backward compatibility, loop through old positional args
pos = ['extensions', 'extension_configs', 'safe_mode', 'output_format']
for c, arg in enumerate(args):
if pos[c] not in kwargs:
kwargs[pos[c]] = arg
if c+1 == len(pos): # pragma: no cover
# ignore any additional args
break
if len(args):
warnings.warn('Positional arguments are deprecated in Markdown. '
'Use keyword arguments only.',
DeprecationWarning)
# Loop through kwargs and assign defaults
for option, default in self.option_defaults.items():
setattr(self, option, kwargs.get(option, default))
self.safeMode = kwargs.get('safe_mode', False)
if self.safeMode and 'enable_attributes' not in kwargs:
# Disable attributes in safeMode when not explicitly set
self.enable_attributes = False
if 'safe_mode' in kwargs:
warnings.warn('"safe_mode" is deprecated in Python-Markdown. '
'Use an HTML sanitizer (like '
'Bleach https://bleach.readthedocs.io/) '
'if you are parsing untrusted markdown text. '
'See the 2.6 release notes for more info',
DeprecationWarning)
if 'html_replacement_text' in kwargs:
warnings.warn('The "html_replacement_text" keyword is '
'deprecated along with "safe_mode".',
DeprecationWarning)
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.registerExtensions(extensions=kwargs.get('extensions', []),
configs=kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml1'))
self.reset()
def build_parser(self):
""" Build the parser from the various parts. """
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword arguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, util.string_type):
ext = self.build_extension(ext, configs.get(ext, {}))
if isinstance(ext, Extension):
ext.extendMarkdown(self, globals())
logger.debug(
'Successfully loaded extension "%s.%s".'
% (ext.__class__.__module__, ext.__class__.__name__)
)
elif ext is not None:
raise TypeError(
'Extension "%s.%s" must be of type: "markdown.Extension"'
% (ext.__class__.__module__, ext.__class__.__name__))
return self
def build_extension(self, ext_name, configs):
"""Build extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
configs = dict(configs)
# Parse extensions config params (ignore the order)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
warnings.warn('Setting configs in the Named Extension string is '
'deprecated. It is recommended that you '
'pass an instance of the extension class to '
'Markdown or use the "extension_configs" keyword. '
'The current behavior will raise an error in version 2.7. '
'See the Release Notes for Python-Markdown version '
'2.6 for more info.', DeprecationWarning)
# Get class name (if provided): `path.to.module:ClassName`
ext_name, class_name = ext_name.split(':', 1) \
if ':' in ext_name else (ext_name, '')
# Try loading the extension first from one place, then another
try:
# Assume string uses dot syntax (`path.to.some.module`)
module = importlib.import_module(ext_name)
logger.debug(
'Successfuly imported extension module "%s".' % ext_name
)
# For backward compat (until deprecation)
# check that this is an extension.
if ('.' not in ext_name and not (hasattr(module, 'makeExtension') or
(class_name and hasattr(module, class_name)))):
# We have a name conflict
# eg: extensions=['tables'] and PyTables is installed
raise ImportError
except ImportError:
# Preppend `markdown.extensions.` to name
module_name = '.'.join(['markdown.extensions', ext_name])
try:
module = importlib.import_module(module_name)
logger.debug(
'Successfuly imported extension module "%s".' %
module_name
)
warnings.warn('Using short names for Markdown\'s builtin '
'extensions is deprecated. Use the '
'full path to the extension with Python\'s dot '
'notation (eg: "%s" instead of "%s"). The '
'current behavior will raise an error in version '
'2.7. See the Release Notes for '
'Python-Markdown version 2.6 for more info.' %
(module_name, ext_name),
DeprecationWarning)
except ImportError:
# Preppend `mdx_` to name
module_name_old_style = '_'.join(['mdx', ext_name])
try:
module = importlib.import_module(module_name_old_style)
logger.debug(
'Successfuly imported extension module "%s".' %
module_name_old_style)
warnings.warn('Markdown\'s behavior of prepending "mdx_" '
'to an extension name is deprecated. '
'Use the full path to the '
'extension with Python\'s dot notation '
'(eg: "%s" instead of "%s"). The current '
'behavior will raise an error in version 2.7. '
'See the Release Notes for Python-Markdown '
'version 2.6 for more info.' %
(module_name_old_style, ext_name),
DeprecationWarning)
except ImportError as e:
message = "Failed loading extension '%s' from '%s', '%s' " \
"or '%s'" % (ext_name, ext_name, module_name,
module_name_old_style)
e.args = (message,) + e.args[1:]
raise
if class_name:
# Load given class name from module.
return getattr(module, class_name)(**configs)
else:
# Expect makeExtension() function to return a class.
try:
return module.makeExtension(**configs)
except AttributeError as e:
message = e.args[0]
message = "Failed to initiate extension " \
"'%s': %s" % (ext_name, message)
e.args = (message,) + e.args[1:]
raise
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
return self
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self
def set_output_format(self, format):
""" Set the output format for the class instance. """
self.output_format = format.lower()
try:
self.serializer = self.output_formats[self.output_format]
except KeyError as e:
valid_formats = list(self.output_formats.keys())
valid_formats.sort()
message = 'Invalid Output Format: "%s". Use one of %s.' \
% (self.output_format,
'"' + '", "'.join(valid_formats) + '"')
e.args = (message,) + e.args[1:]
raise
return self
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string.
"""
# Fixup the source text
if not source.strip():
return '' # a blank unicode string
try:
source = util.text_type(source)
except UnicodeDecodeError as e:
# Customise error message while maintaining original trackback
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot is not None:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = output.index(
'<%s>' % self.doc_tag) + len(self.doc_tag) + 2
end = output.rindex('</%s>' % self.doc_tag)
output = output[start:end].strip()
except ValueError: # pragma: no cover
if output.strip().endswith('<%s />' % self.doc_tag):
# We have an empty document
output = ''
else:
# We have a serious problem
raise ValueError('Markdown failed to strip top-level '
'tags. Document=%r' % output.strip())
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a Markdown file and returns the HTML as a Unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file. The 'xmlcharrefreplace' error handler is
used when encoding the output.
**Note:** This is the only place that decoding and encoding of Unicode
takes place in Python-Markdown. (All other code is Unicode-in /
Unicode-out.)
Keyword arguments:
* input: File object or path. Reads from stdin if `None`.
* output: File object or path. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
if input:
if isinstance(input, util.string_type):
input_file = codecs.open(input, mode="r", encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
if not isinstance(text, util.text_type):
text = text.decode(encoding)
text = text.lstrip('\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if output:
if isinstance(output, util.string_type):
output_file = codecs.open(output, "w",
encoding=encoding,
errors="xmlcharrefreplace")
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors="xmlcharrefreplace")
output_file.write(html)
# Don't close here. User may want to write more.
else:
# Encode manually and write bytes to stdout.
html = html.encode(encoding, "xmlcharrefreplace")
try:
# Write bytes directly to buffer (Python 3).
sys.stdout.buffer.write(html)
except AttributeError:
# Probably Python 2, which works with bytes by default.
sys.stdout.write(html)
return self
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text, *args, **kwargs):
"""Convert a Markdown string to HTML and return HTML as a Unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* Any arguments accepted by the Markdown class.
Returns: An HTML document as a string.
"""
md = Markdown(*args, **kwargs)
return md.convert(text)
def markdownFromFile(*args, **kwargs):
"""Read markdown code from a file and write it to a file or a stream.
This is a shortcut function which initializes an instance of Markdown,
and calls the convertFile method rather than convert.
Keyword arguments:
* input: a file name or readable object.
* output: a file name or writable object.
* encoding: Encoding of input and output.
* Any arguments accepted by the Markdown class.
"""
# For backward compatibility loop through positional args
pos = ['input', 'output', 'extensions', 'encoding']
c = 0
for arg in args:
if pos[c] not in kwargs:
kwargs[pos[c]] = arg
c += 1
if c == len(pos):
break
if len(args):
warnings.warn('Positional arguments are depreacted in '
'Markdown and will raise an error in version 2.7. '
'Use keyword arguments only.',
DeprecationWarning)
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None),
kwargs.get('output', None),
kwargs.get('encoding', None))
| mit |
wunderlins/learning | python/zodb/lib/osx/ZODB/tests/testblob.py | 2 | 25257 | ##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from ZODB.blob import Blob
from ZODB.blob import BushyLayout
from ZODB.DB import DB
from ZODB.FileStorage import FileStorage
from ZODB.tests.testConfig import ConfigTestBase
from ZODB._compat import Pickler, Unpickler, _protocol
import os
if os.environ.get('USE_ZOPE_TESTING_DOCTEST'):
from zope.testing import doctest
else:
import doctest
import os
import random
import re
import struct
import sys
import time
import transaction
import unittest
import ZConfig
import ZODB.blob
import ZODB.interfaces
import ZODB.tests.IteratorStorage
import ZODB.tests.StorageTestBase
import ZODB.tests.util
import zope.testing.renormalizing
try:
from StringIO import StringIO as BytesIO
except ImportError:
# Py3
from io import BytesIO
try:
file_type = file
except NameError:
# Py3: Python 3 does not have a file type.
import io
file_type = io.BufferedReader
def new_time():
"""Create a _new_ time stamp.
This method also makes sure that after retrieving a timestamp that was
*before* a transaction was committed, that at least one second passes so
the packing time actually is before the commit time.
"""
now = new_time = time.time()
while new_time <= now:
new_time = time.time()
time.sleep(1)
return new_time
class ZODBBlobConfigTest(ConfigTestBase):
def test_map_config1(self):
self._test(
"""
<zodb>
<blobstorage>
blob-dir blobs
<mappingstorage/>
</blobstorage>
</zodb>
""")
def test_file_config1(self):
self._test(
"""
<zodb>
<blobstorage>
blob-dir blobs
<filestorage>
path Data.fs
</filestorage>
</blobstorage>
</zodb>
""")
def test_blob_dir_needed(self):
self.assertRaises(ZConfig.ConfigurationSyntaxError,
self._test,
"""
<zodb>
<blobstorage>
<mappingstorage/>
</blobstorage>
</zodb>
""")
class BlobCloneTests(ZODB.tests.util.TestCase):
def testDeepCopyCanInvalidate(self):
"""
Tests regression for invalidation problems related to missing
readers and writers values in cloned objects (see
http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
"""
import ZODB.MappingStorage
database = DB(ZODB.blob.BlobStorage(
'blobs', ZODB.MappingStorage.MappingStorage()))
connection = database.open()
root = connection.root()
transaction.begin()
root['blob'] = Blob()
transaction.commit()
stream = BytesIO()
p = Pickler(stream, _protocol)
p.dump(root['blob'])
u = Unpickler(stream)
stream.seek(0)
clone = u.load()
clone._p_invalidate()
# it should also be possible to open the cloned blob
# (even though it won't contain the original data)
clone.open().close()
# tearDown
database.close()
class BushyLayoutTests(ZODB.tests.util.TestCase):
def testBushyLayoutOIDToPathUnicode(self):
"OID-to-path should produce valid results given non-ASCII byte strings"
non_ascii_oid = b'>\xf1<0\xe9Q\x99\xf0'
# The argument should already be bytes;
# os.path.sep is native string type under both 2 and 3
# binascii.hexlify takes bytes and produces bytes under both py2 and py3
# the result should be the native string type
oid_as_path = BushyLayout().oid_to_path(non_ascii_oid)
self.assertEqual(
oid_as_path,
os.path.sep.join(
'0x3e/0xf1/0x3c/0x30/0xe9/0x51/0x99/0xf0'.split('/')))
# the reverse holds true as well
path_as_oid = BushyLayout().path_to_oid(oid_as_path)
self.assertEqual(
path_as_oid,
non_ascii_oid )
class BlobTestBase(ZODB.tests.StorageTestBase.StorageTestBase):
def setUp(self):
ZODB.tests.StorageTestBase.StorageTestBase.setUp(self)
self._storage = self.create_storage()
class BlobUndoTests(BlobTestBase):
def testUndoWithoutPreviousVersion(self):
database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
root['blob'] = Blob()
transaction.commit()
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
# the blob footprint object should exist no longer
self.assertRaises(KeyError, root.__getitem__, 'blob')
database.close()
def testUndo(self):
database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
blob = Blob()
with blob.open('w') as file:
file.write(b'this is state 1')
root['blob'] = blob
transaction.commit()
transaction.begin()
blob = root['blob']
with blob.open('w') as file:
file.write(b'this is state 2')
transaction.commit()
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
with blob.open('r') as file:
self.assertEqual(file.read(), b'this is state 1')
database.close()
def testUndoAfterConsumption(self):
database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
with open('consume1', 'wb') as file:
file.write(b'this is state 1')
blob = Blob()
blob.consumeFile('consume1')
root['blob'] = blob
transaction.commit()
transaction.begin()
blob = root['blob']
with open('consume2', 'wb') as file:
file.write(b'this is state 2')
blob.consumeFile('consume2')
transaction.commit()
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
with blob.open('r') as file:
self.assertEqual(file.read(), b'this is state 1')
database.close()
def testRedo(self):
database = DB(self._storage)
connection = database.open()
root = connection.root()
blob = Blob()
transaction.begin()
with blob.open('w') as file:
file.write(b'this is state 1')
root['blob'] = blob
transaction.commit()
transaction.begin()
blob = root['blob']
with blob.open('w') as file:
file.write(b'this is state 2')
transaction.commit()
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
with blob.open('r') as file:
self.assertEqual(file.read(), b'this is state 1')
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
with blob.open('r') as file:
self.assertEqual(file.read(), b'this is state 2')
database.close()
def testRedoOfCreation(self):
database = DB(self._storage)
connection = database.open()
root = connection.root()
blob = Blob()
transaction.begin()
with blob.open('w') as file:
file.write(b'this is state 1')
root['blob'] = blob
transaction.commit()
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertRaises(KeyError, root.__getitem__, 'blob')
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
with blob.open('r') as file:
self.assertEqual(file.read(), b'this is state 1')
database.close()
class RecoveryBlobStorage(BlobTestBase,
ZODB.tests.IteratorStorage.IteratorDeepCompare):
def setUp(self):
BlobTestBase.setUp(self)
self._dst = self.create_storage('dest')
def tearDown(self):
self._dst.close()
BlobTestBase.tearDown(self)
# Requires a setUp() that creates a self._dst destination storage
def testSimpleBlobRecovery(self):
self.assertTrue(
ZODB.interfaces.IBlobStorageRestoreable.providedBy(self._storage)
)
db = DB(self._storage)
conn = db.open()
conn.root()[1] = ZODB.blob.Blob()
transaction.commit()
conn.root()[2] = ZODB.blob.Blob()
with conn.root()[2].open('w') as file:
file.write(b'some data')
transaction.commit()
conn.root()[3] = ZODB.blob.Blob()
with conn.root()[3].open('w') as file:
file.write(
(b''.join(struct.pack(">I", random.randint(0, (1<<32)-1))
for i in range(random.randint(10000,20000)))
)[:-random.randint(1,4)]
)
transaction.commit()
conn.root()[2] = ZODB.blob.Blob()
with conn.root()[2].open('w') as file:
file.write(b'some other data')
transaction.commit()
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
def gc_blob_removes_uncommitted_data():
"""
>>> blob = Blob()
>>> with blob.open('w') as file:
... _ = file.write(b'x')
>>> fname = blob._p_blob_uncommitted
>>> os.path.exists(fname)
True
>>> file = blob = None
PyPy not being reference counted actually needs GC to be
explicitly requested. In experiments, it finds the weakref
on the first collection, but only does the cleanup on the second
collection:
>>> import gc
>>> _ = gc.collect()
>>> _ = gc.collect()
Now the file is gone on all platforms:
>>> os.path.exists(fname)
False
"""
def commit_from_wrong_partition():
"""
It should be possible to commit changes even when a blob is on a
different partition.
We can simulare this by temporarily breaking os.rename. :)
>>> def fail(*args):
... raise OSError
>>> os_rename = os.rename
>>> os.rename = fail
>>> import logging
>>> logger = logging.getLogger('ZODB.blob.copied')
>>> handler = logging.StreamHandler(sys.stdout)
>>> logger.propagate = False
>>> logger.setLevel(logging.DEBUG)
>>> logger.addHandler(handler)
>>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> with root['blob'].open('w') as file:
... _ = file.write(b'test')
>>> transaction.commit() # doctest: +ELLIPSIS
Copied blob file ...
>>> with root['blob'].open() as fp: fp.read()
'test'
Works with savepoints too:
>>> root['blob2'] = Blob()
>>> with root['blob2'].open('w') as file:
... _ = file.write(b'test2')
>>> _ = transaction.savepoint() # doctest: +ELLIPSIS
Copied blob file ...
>>> transaction.commit() # doctest: +ELLIPSIS
Copied blob file ...
>>> with root['blob2'].open() as fp: fp.read()
'test2'
>>> os.rename = os_rename
>>> logger.propagate = True
>>> logger.setLevel(0)
>>> logger.removeHandler(handler)
>>> handler.close()
>>> database.close()
"""
def packing_with_uncommitted_data_non_undoing():
"""
This covers regression for bug #130459.
When uncommitted data exists it formerly was written to the root of the
blob_directory and confused our packing strategy. We now use a separate
temporary directory that is ignored while packing.
>>> import transaction
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> with root['blob'].open('w') as file:
... _ = file.write(b'test')
>>> blob_storage.pack(new_time(), referencesf)
Clean up:
>>> database.close()
"""
def packing_with_uncommitted_data_undoing():
"""
This covers regression for bug #130459.
When uncommitted data exists it formerly was written to the root of the
blob_directory and confused our packing strategy. We now use a separate
temporary directory that is ignored while packing.
>>> from ZODB.serialize import referencesf
>>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> with root['blob'].open('w') as file:
... _ = file.write(b'test')
>>> blob_storage.pack(new_time(), referencesf)
Clean up:
>>> database.close()
"""
def secure_blob_directory():
"""
This is a test for secure creation and verification of secure settings of
blob directories.
>>> blob_storage = create_storage(blob_dir='blobs')
Two directories are created:
>>> os.path.isdir('blobs')
True
>>> tmp_dir = os.path.join('blobs', 'tmp')
>>> os.path.isdir(tmp_dir)
True
They are only accessible by the owner:
>>> oct(os.stat('blobs').st_mode)[-5:]
'40700'
>>> oct(os.stat(tmp_dir).st_mode)[-5:]
'40700'
These settings are recognized as secure:
>>> blob_storage.fshelper.isSecure('blobs')
True
>>> blob_storage.fshelper.isSecure(tmp_dir)
True
After making the permissions of tmp_dir more liberal, the directory is
recognized as insecure:
>>> os.chmod(tmp_dir, 0o40711)
>>> blob_storage.fshelper.isSecure(tmp_dir)
False
Clean up:
>>> blob_storage.close()
"""
# On windows, we can't create secure blob directories, at least not
# with APIs in the standard library, so there's no point in testing
# this.
if sys.platform == 'win32':
del secure_blob_directory
def loadblob_tmpstore():
"""
This is a test for assuring that the TmpStore's loadBlob implementation
falls back correctly to loadBlob on the backend.
First, let's setup a regular database and store a blob:
>>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> with root['blob'].open('w') as file:
... _ = file.write(b'test')
>>> import transaction
>>> transaction.commit()
>>> blob_oid = root['blob']._p_oid
>>> tid = connection._storage.lastTransaction()
Now we open a database with a TmpStore in front:
>>> database.close()
>>> from ZODB.Connection import TmpStore
>>> tmpstore = TmpStore(blob_storage)
We can access the blob correctly:
>>> tmpstore.loadBlob(blob_oid,tid) == blob_storage.loadBlob(blob_oid,tid)
True
Clean up:
>>> tmpstore.close()
>>> database.close()
"""
def is_blob_record():
r"""
>>> bs = create_storage()
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
>>> transaction.commit()
>>> ZODB.blob.is_blob_record(bs.load(ZODB.utils.p64(0), '')[0])
False
>>> ZODB.blob.is_blob_record(bs.load(ZODB.utils.p64(1), '')[0])
True
An invalid pickle yields a false value:
>>> ZODB.blob.is_blob_record(b"Hello world!")
False
>>> ZODB.blob.is_blob_record(b'c__main__\nC\nq\x01.')
False
>>> ZODB.blob.is_blob_record(b'cWaaaa\nC\nq\x01.')
False
As does None, which may occur in delete records:
>>> ZODB.blob.is_blob_record(None)
False
>>> db.close()
"""
def do_not_depend_on_cwd():
"""
>>> bs = create_storage()
>>> here = os.getcwd()
>>> os.mkdir('evil')
>>> os.chdir('evil')
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
>>> with conn.root()['blob'].open('w') as file:
... _ = file.write(b'data')
>>> transaction.commit()
>>> os.chdir(here)
>>> with conn.root()['blob'].open() as fp: fp.read()
'data'
>>> bs.close()
"""
def savepoint_isolation():
"""Make sure savepoint data is distinct accross transactions
>>> bs = create_storage()
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root.b = ZODB.blob.Blob(b'initial')
>>> transaction.commit()
>>> with conn.root.b.open('w') as file:
... _ = file.write(b'1')
>>> _ = transaction.savepoint()
>>> tm = transaction.TransactionManager()
>>> conn2 = db.open(transaction_manager=tm)
>>> with conn2.root.b.open('w') as file:
... _ = file.write(b'2')
>>> _ = tm.savepoint()
>>> with conn.root.b.open() as fp: fp.read()
'1'
>>> with conn2.root.b.open() as fp: fp.read()
'2'
>>> transaction.abort()
>>> tm.commit()
>>> conn.sync()
>>> with conn.root.b.open() as fp: fp.read()
'2'
>>> db.close()
"""
def savepoint_commits_without_invalidations_out_of_order():
"""Make sure transactions with blobs can be commited without the
invalidations out of order error (LP #509801)
>>> bs = create_storage()
>>> db = DB(bs)
>>> tm1 = transaction.TransactionManager()
>>> conn1 = db.open(transaction_manager=tm1)
>>> conn1.root.b = ZODB.blob.Blob(b'initial')
>>> tm1.commit()
>>> with conn1.root.b.open('w') as file:
... _ = file.write(b'1')
>>> _ = tm1.savepoint()
>>> tm2 = transaction.TransactionManager()
>>> conn2 = db.open(transaction_manager=tm2)
>>> with conn2.root.b.open('w') as file:
... _ = file.write(b'2')
>>> _ = tm1.savepoint()
>>> with conn1.root.b.open() as fp: fp.read()
'1'
>>> with conn2.root.b.open() as fp: fp.read()
'2'
>>> tm2.commit()
>>> tm1.commit() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ConflictError: database conflict error...
>>> tm1.abort()
>>> db.close()
"""
def savepoint_cleanup():
"""Make sure savepoint data gets cleaned up.
>>> bs = create_storage()
>>> tdir = bs.temporaryDirectory()
>>> os.listdir(tdir)
[]
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root.b = ZODB.blob.Blob(b'initial')
>>> _ = transaction.savepoint()
>>> len(os.listdir(tdir))
1
>>> transaction.abort()
>>> os.listdir(tdir)
[]
>>> conn.root.b = ZODB.blob.Blob(b'initial')
>>> transaction.commit()
>>> with conn.root.b.open('w') as file:
... _ = file.write(b'1')
>>> _ = transaction.savepoint()
>>> transaction.abort()
>>> os.listdir(tdir)
[]
>>> db.close()
"""
def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
r"""
>>> conn = ZODB.connection('data.fs', blob_dir='blobs')
>>> blob = ZODB.blob.Blob(b'blah')
>>> conn.add(blob)
>>> transaction.commit()
>>> old_serial = blob._p_serial
>>> blob._p_changed = True
>>> transaction.commit()
>>> with blob.open() as fp: fp.read()
'blah'
>>> old_serial == blob._p_serial
True
>>> conn.close()
"""
def setUp(test):
ZODB.tests.util.setUp(test)
test.globs['rmtree'] = zope.testing.setupstack.rmtree
def setUpBlobAdaptedFileStorage(test):
setUp(test)
def create_storage(name='data', blob_dir=None):
if blob_dir is None:
blob_dir = '%s.bobs' % name
return ZODB.blob.BlobStorage(blob_dir, FileStorage('%s.fs' % name))
test.globs['create_storage'] = create_storage
def storage_reusable_suite(prefix, factory,
test_blob_storage_recovery=False,
test_packing=False,
test_undo=True,
):
"""Return a test suite for a generic IBlobStorage.
Pass a factory taking a name and a blob directory name.
"""
def setup(test):
setUp(test)
def create_storage(name='data', blob_dir=None):
if blob_dir is None:
blob_dir = '%s.bobs' % name
return factory(name, blob_dir)
test.globs['create_storage'] = create_storage
test.globs['file_type'] = file_type
suite = unittest.TestSuite()
suite.addTest(doctest.DocFileSuite(
"blob_connection.txt",
"blob_importexport.txt",
"blob_transaction.txt",
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
checker=zope.testing.renormalizing.RENormalizing([
# Py3k renders bytes where Python2 used native strings...
(re.compile(r"^b'"), "'"),
(re.compile(r'^b"'), '"'),
# ...and native strings where Python2 used unicode.
(re.compile("^POSKeyError: u'No blob file"),
"POSKeyError: 'No blob file"),
# Py3k repr's exceptions with dotted names
(re.compile("^ZODB.interfaces.BlobError:"), "BlobError:"),
(re.compile("^ZODB.POSException.ConflictError:"), "ConflictError:"),
(re.compile("^ZODB.POSException.POSKeyError:"), "POSKeyError:"),
(re.compile("^ZODB.POSException.Unsupported:"), "Unsupported:"),
# Normalize out blobfile paths for sake of Windows
(re.compile(
r'([a-zA-Z]:)?\%(sep)s.*\%(sep)s(server-)?blobs\%(sep)s.*\.blob'
% dict(sep=os.path.sep)), '<BLOB STORAGE PATH>')
]),
optionflags=doctest.ELLIPSIS,
))
if test_packing:
suite.addTest(doctest.DocFileSuite(
"blob_packing.txt",
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
))
suite.addTest(doctest.DocTestSuite(
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
checker = ZODB.tests.util.checker + \
zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
]),
))
def create_storage(self, name='data', blob_dir=None):
if blob_dir is None:
blob_dir = '%s.bobs' % name
return factory(name, blob_dir)
def add_test_based_on_test_class(class_):
new_class = class_.__class__(
prefix+class_.__name__, (class_, ),
dict(create_storage=create_storage),
)
suite.addTest(unittest.makeSuite(new_class))
if test_blob_storage_recovery:
add_test_based_on_test_class(RecoveryBlobStorage)
if test_undo:
add_test_based_on_test_class(BlobUndoTests)
suite.layer = ZODB.tests.util.MininalTestLayer(prefix+'BlobTests')
return suite
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
suite.addTest(unittest.makeSuite(BlobCloneTests))
suite.addTest(unittest.makeSuite(BushyLayoutTests))
suite.addTest(doctest.DocFileSuite(
"blob_basic.txt",
"blob_consume.txt",
"blob_tempdir.txt",
"blobstorage_packing.txt",
setUp=setUp,
tearDown=zope.testing.setupstack.tearDown,
optionflags=doctest.ELLIPSIS,
checker=ZODB.tests.util.checker,
))
suite.addTest(doctest.DocFileSuite(
"blob_layout.txt",
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
setUp=setUp,
tearDown=zope.testing.setupstack.tearDown,
checker=ZODB.tests.util.checker +
zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\S+/((old|bushy|lawn)/\S+/foo[23456]?)'), r'\1'),
(re.compile(r"u('[^']*')"), r"\1"),
]),
))
suite.addTest(storage_reusable_suite(
'BlobAdaptedFileStorage',
lambda name, blob_dir:
ZODB.blob.BlobStorage(blob_dir, FileStorage('%s.fs' % name)),
test_blob_storage_recovery=True,
test_packing=True,
))
return suite
if __name__ == '__main__':
unittest.main(defaultTest = 'test_suite')
| gpl-2.0 |
wainersm/buildbot | master/buildbot/test/unit/test_scripts_upgrade_master.py | 10 | 8417 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import mock
from twisted.internet import defer
from twisted.python.compat import NativeStringIO
from twisted.trial import unittest
from buildbot import config as config_module
from buildbot.db import connector
from buildbot.db import masters
from buildbot.db import model
from buildbot.scripts import base
from buildbot.scripts import upgrade_master
from buildbot.test.util import dirs
from buildbot.test.util import misc
from buildbot.test.util import www
def mkconfig(**kwargs):
config = dict(quiet=False, replace=False, basedir='test')
config.update(kwargs)
return config
class TestUpgradeMaster(dirs.DirsMixin, misc.StdoutAssertionsMixin,
unittest.TestCase):
def setUp(self):
# createMaster is decorated with @in_reactor, so strip that decoration
# since the master is already running
self.patch(upgrade_master, 'upgradeMaster',
upgrade_master.upgradeMaster._orig)
self.setUpDirs('test')
self.setUpStdoutAssertions()
def patchFunctions(self, basedirOk=True, configOk=True):
self.calls = []
def checkBasedir(config):
self.calls.append('checkBasedir')
return basedirOk
self.patch(base, 'checkBasedir', checkBasedir)
def loadConfig(config, configFileName='master.cfg'):
self.calls.append('loadConfig')
return config_module.MasterConfig() if configOk else False
self.patch(base, 'loadConfig', loadConfig)
def upgradeFiles(config):
self.calls.append('upgradeFiles')
self.patch(upgrade_master, 'upgradeFiles', upgradeFiles)
def upgradeDatabase(config, master_cfg):
self.assertIsInstance(master_cfg, config_module.MasterConfig)
self.calls.append('upgradeDatabase')
self.patch(upgrade_master, 'upgradeDatabase', upgradeDatabase)
# tests
def test_upgradeMaster_success(self):
self.patchFunctions()
d = upgrade_master.upgradeMaster(mkconfig(), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 0)
self.assertInStdout('upgrade complete')
return d
def test_upgradeMaster_quiet(self):
self.patchFunctions()
d = upgrade_master.upgradeMaster(mkconfig(quiet=True), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 0)
self.assertWasQuiet()
return d
def test_upgradeMaster_bad_basedir(self):
self.patchFunctions(basedirOk=False)
d = upgrade_master.upgradeMaster(mkconfig(), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 1)
return d
def test_upgradeMaster_bad_config(self):
self.patchFunctions(configOk=False)
d = upgrade_master.upgradeMaster(mkconfig(), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 1)
return d
class TestUpgradeMasterFunctions(www.WwwTestMixin, dirs.DirsMixin,
misc.StdoutAssertionsMixin, unittest.TestCase):
def setUp(self):
self.setUpDirs('test')
self.basedir = os.path.abspath(os.path.join('test', 'basedir'))
self.setUpStdoutAssertions()
def tearDown(self):
self.tearDownDirs()
def writeFile(self, path, contents):
with open(path, 'wt') as f:
f.write(contents)
def readFile(self, path):
with open(path, 'rt') as f:
return f.read()
# tests
def test_installFile(self):
self.writeFile('test/srcfile', 'source data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile')
self.assertEqual(self.readFile('test/destfile'), 'source data')
self.assertInStdout('creating test/destfile')
def test_installFile_existing_differing(self):
self.writeFile('test/srcfile', 'source data')
self.writeFile('test/destfile', 'dest data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile')
self.assertEqual(self.readFile('test/destfile'), 'dest data')
self.assertEqual(self.readFile('test/destfile.new'), 'source data')
self.assertInStdout('writing new contents to')
def test_installFile_existing_differing_overwrite(self):
self.writeFile('test/srcfile', 'source data')
self.writeFile('test/destfile', 'dest data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile',
overwrite=True)
self.assertEqual(self.readFile('test/destfile'), 'source data')
self.assertFalse(os.path.exists('test/destfile.new'))
self.assertInStdout('overwriting')
def test_installFile_existing_same(self):
self.writeFile('test/srcfile', 'source data')
self.writeFile('test/destfile', 'source data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile')
self.assertEqual(self.readFile('test/destfile'), 'source data')
self.assertFalse(os.path.exists('test/destfile.new'))
self.assertWasQuiet()
def test_installFile_quiet(self):
self.writeFile('test/srcfile', 'source data')
upgrade_master.installFile(mkconfig(quiet=True), 'test/destfile',
'test/srcfile')
self.assertWasQuiet()
def test_upgradeFiles(self):
upgrade_master.upgradeFiles(mkconfig())
for f in [
'test/master.cfg.sample',
]:
self.assertTrue(os.path.exists(f), "%s not found" % f)
self.assertInStdout('upgrading basedir')
def test_upgradeFiles_notice_about_unused_public_html(self):
os.mkdir('test/public_html')
self.writeFile('test/public_html/index.html', 'INDEX')
upgrade_master.upgradeFiles(mkconfig())
self.assertInStdout('public_html is not used')
@defer.inlineCallbacks
def test_upgradeDatabase(self):
setup = mock.Mock(side_effect=lambda **kwargs: defer.succeed(None))
self.patch(connector.DBConnector, 'setup', setup)
upgrade = mock.Mock(side_effect=lambda **kwargs: defer.succeed(None))
self.patch(model.Model, 'upgrade', upgrade)
setAllMastersActiveLongTimeAgo = mock.Mock(
side_effect=lambda **kwargs: defer.succeed(None))
self.patch(masters.MastersConnectorComponent,
'setAllMastersActiveLongTimeAgo', setAllMastersActiveLongTimeAgo)
yield upgrade_master.upgradeDatabase(
mkconfig(basedir='test', quiet=True),
config_module.MasterConfig())
setup.asset_called_with(check_version=False, verbose=False)
upgrade.assert_called_with()
self.assertWasQuiet()
@defer.inlineCallbacks
def test_upgradeDatabaseFail(self):
setup = mock.Mock(side_effect=lambda **kwargs: defer.succeed(None))
self.patch(connector.DBConnector, 'setup', setup)
self.patch(sys, 'stderr', NativeStringIO())
upgrade = mock.Mock(
side_effect=lambda **kwargs: defer.fail(Exception("o noz")))
self.patch(model.Model, 'upgrade', upgrade)
ret = yield upgrade_master._upgradeMaster(
mkconfig(basedir='test', quiet=True),
config_module.MasterConfig())
self.assertEqual(ret, 1)
self.assertIn("problem while upgrading!:\nTraceback (most recent call last):\n",
sys.stderr.getvalue())
self.assertIn("o noz", sys.stderr.getvalue())
| gpl-2.0 |
Dapid/GPy | GPy/core/mapping.py | 11 | 1166 | # Copyright (c) 2013,2014, GPy authors (see AUTHORS.txt).
# Copyright (c) 2015, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import sys
from .parameterization import Parameterized
import numpy as np
class Mapping(Parameterized):
"""
Base model for shared mapping behaviours
"""
def __init__(self, input_dim, output_dim, name='mapping'):
self.input_dim = input_dim
self.output_dim = output_dim
super(Mapping, self).__init__(name=name)
def f(self, X):
raise NotImplementedError
def gradients_X(self, dL_dF, X):
raise NotImplementedError
def update_gradients(self, dL_dF, X):
raise NotImplementedError
class Bijective_mapping(Mapping):
"""
This is a mapping that is bijective, i.e. you can go from X to f and
also back from f to X. The inverse mapping is called g().
"""
def __init__(self, input_dim, output_dim, name='bijective_mapping'):
super(Bijective_mapping, self).__init__(name=name)
def g(self, f):
"""Inverse mapping from output domain of the function to the inputs."""
raise NotImplementedError
| bsd-3-clause |
WriterOfAlicrow/servo | tests/wpt/update/github.py | 197 | 5152 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urlparse import urljoin
requests = None
class GitHubError(Exception):
def __init__(self, status, data):
self.status = status
self.data = data
class GitHub(object):
url_base = "https://api.github.com"
def __init__(self, token):
# Defer the import of requests since it isn't installed by default
global requests
if requests is None:
import requests
self.headers = {"Accept": "application/vnd.github.v3+json"}
self.auth = (token, "x-oauth-basic")
def get(self, path):
return self._request("GET", path)
def post(self, path, data):
return self._request("POST", path, data=data)
def put(self, path, data):
return self._request("PUT", path, data=data)
def _request(self, method, path, data=None):
url = urljoin(self.url_base, path)
kwargs = {"headers": self.headers,
"auth": self.auth}
if data is not None:
kwargs["data"] = json.dumps(data)
resp = requests.request(method, url, **kwargs)
if 200 <= resp.status_code < 300:
return resp.json()
else:
raise GitHubError(resp.status_code, resp.json())
def repo(self, owner, name):
"""GitHubRepo for a particular repository.
:param owner: String repository owner
:param name: String repository name
"""
return GitHubRepo.from_name(self, owner, name)
class GitHubRepo(object):
def __init__(self, github, data):
"""Object respresenting a GitHub respoitory"""
self.gh = github
self.owner = data["owner"]
self.name = data["name"]
self.url = data["ssh_url"]
self._data = data
@classmethod
def from_name(cls, github, owner, name):
data = github.get("/repos/%s/%s" % (owner, name))
return cls(github, data)
@property
def url_base(self):
return "/repos/%s/" % (self._data["full_name"])
def create_pr(self, title, head, base, body):
"""Create a Pull Request in the repository
:param title: Pull Request title
:param head: ref to the HEAD of the PR branch.
:param base: ref to the base branch for the Pull Request
:param body: Description of the PR
"""
return PullRequest.create(self, title, head, base, body)
def load_pr(self, number):
"""Load an existing Pull Request by number.
:param number: Pull Request number
"""
return PullRequest.from_number(self, number)
def path(self, suffix):
return urljoin(self.url_base, suffix)
class PullRequest(object):
def __init__(self, repo, data):
"""Object representing a Pull Request"""
self.repo = repo
self._data = data
self.number = data["number"]
self.title = data["title"]
self.base = data["base"]["ref"]
self.base = data["head"]["ref"]
self._issue = None
@classmethod
def from_number(cls, repo, number):
data = repo.gh.get(repo.path("pulls/%i" % number))
return cls(repo, data)
@classmethod
def create(cls, repo, title, head, base, body):
data = repo.gh.post(repo.path("pulls"),
{"title": title,
"head": head,
"base": base,
"body": body})
return cls(repo, data)
def path(self, suffix):
return urljoin(self.repo.path("pulls/%i/" % self.number), suffix)
@property
def issue(self):
"""Issue related to the Pull Request"""
if self._issue is None:
self._issue = Issue.from_number(self.repo, self.number)
return self._issue
def merge(self, commit_message=None):
"""Merge the Pull Request into its base branch.
:param commit_message: Message to use for the merge commit. If None a default
message is used instead
"""
if commit_message is None:
commit_message = "Merge pull request #%i from %s" % (self.number, self.base)
self.repo.gh.put(self.path("merge"),
{"commit_message": commit_message})
class Issue(object):
def __init__(self, repo, data):
"""Object representing a GitHub Issue"""
self.repo = repo
self._data = data
self.number = data["number"]
@classmethod
def from_number(cls, repo, number):
data = repo.gh.get(repo.path("issues/%i" % number))
return cls(repo, data)
def path(self, suffix):
return urljoin(self.repo.path("issues/%i/" % self.number), suffix)
def add_comment(self, message):
"""Add a comment to the issue
:param message: The text of the comment
"""
self.repo.gh.post(self.path("comments"),
{"body": message})
| mpl-2.0 |
gxx/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/forms/localflavor/fi.py | 51 | 16281 | from django.contrib.localflavor.fi.forms import (FIZipCodeField,
FISocialSecurityNumber, FIMunicipalitySelect)
from utils import LocalFlavorTestCase
class FILocalFlavorTests(LocalFlavorTestCase):
def test_FIMunicipalitySelect(self):
f = FIMunicipalitySelect()
out = u'''<select name="municipalities">
<option value="akaa">Akaa</option>
<option value="alajarvi">Alaj\xe4rvi</option>
<option value="alavieska">Alavieska</option>
<option value="alavus">Alavus</option>
<option value="artjarvi">Artj\xe4rvi</option>
<option value="asikkala">Asikkala</option>
<option value="askola">Askola</option>
<option value="aura">Aura</option>
<option value="brando">Br\xe4nd\xf6</option>
<option value="eckero">Ecker\xf6</option>
<option value="enonkoski">Enonkoski</option>
<option value="enontekio">Enonteki\xf6</option>
<option value="espoo">Espoo</option>
<option value="eura">Eura</option>
<option value="eurajoki">Eurajoki</option>
<option value="evijarvi">Evij\xe4rvi</option>
<option value="finstrom">Finstr\xf6m</option>
<option value="forssa">Forssa</option>
<option value="foglo">F\xf6gl\xf6</option>
<option value="geta">Geta</option>
<option value="haapajarvi">Haapaj\xe4rvi</option>
<option value="haapavesi">Haapavesi</option>
<option value="hailuoto">Hailuoto</option>
<option value="halsua">Halsua</option>
<option value="hamina">Hamina</option>
<option value="hammarland">Hammarland</option>
<option value="hankasalmi">Hankasalmi</option>
<option value="hanko">Hanko</option>
<option value="harjavalta">Harjavalta</option>
<option value="hartola">Hartola</option>
<option value="hattula">Hattula</option>
<option value="haukipudas">Haukipudas</option>
<option value="hausjarvi">Hausj\xe4rvi</option>
<option value="heinola">Heinola</option>
<option value="heinavesi">Hein\xe4vesi</option>
<option value="helsinki">Helsinki</option>
<option value="hirvensalmi">Hirvensalmi</option>
<option value="hollola">Hollola</option>
<option value="honkajoki">Honkajoki</option>
<option value="huittinen">Huittinen</option>
<option value="humppila">Humppila</option>
<option value="hyrynsalmi">Hyrynsalmi</option>
<option value="hyvinkaa">Hyvink\xe4\xe4</option>
<option value="hameenkoski">H\xe4meenkoski</option>
<option value="hameenkyro">H\xe4meenkyr\xf6</option>
<option value="hameenlinna">H\xe4meenlinna</option>
<option value="ii">Ii</option>
<option value="iisalmi">Iisalmi</option>
<option value="iitti">Iitti</option>
<option value="ikaalinen">Ikaalinen</option>
<option value="ilmajoki">Ilmajoki</option>
<option value="ilomantsi">Ilomantsi</option>
<option value="imatra">Imatra</option>
<option value="inari">Inari</option>
<option value="inkoo">Inkoo</option>
<option value="isojoki">Isojoki</option>
<option value="isokyro">Isokyr\xf6</option>
<option value="jalasjarvi">Jalasj\xe4rvi</option>
<option value="janakkala">Janakkala</option>
<option value="joensuu">Joensuu</option>
<option value="jokioinen">Jokioinen</option>
<option value="jomala">Jomala</option>
<option value="joroinen">Joroinen</option>
<option value="joutsa">Joutsa</option>
<option value="juankoski">Juankoski</option>
<option value="juuka">Juuka</option>
<option value="juupajoki">Juupajoki</option>
<option value="juva">Juva</option>
<option value="jyvaskyla">Jyv\xe4skyl\xe4</option>
<option value="jamijarvi">J\xe4mij\xe4rvi</option>
<option value="jamsa">J\xe4ms\xe4</option>
<option value="jarvenpaa">J\xe4rvenp\xe4\xe4</option>
<option value="kaarina">Kaarina</option>
<option value="kaavi">Kaavi</option>
<option value="kajaani">Kajaani</option>
<option value="kalajoki">Kalajoki</option>
<option value="kangasala">Kangasala</option>
<option value="kangasniemi">Kangasniemi</option>
<option value="kankaanpaa">Kankaanp\xe4\xe4</option>
<option value="kannonkoski">Kannonkoski</option>
<option value="kannus">Kannus</option>
<option value="karijoki">Karijoki</option>
<option value="karjalohja">Karjalohja</option>
<option value="karkkila">Karkkila</option>
<option value="karstula">Karstula</option>
<option value="karttula">Karttula</option>
<option value="karvia">Karvia</option>
<option value="kaskinen">Kaskinen</option>
<option value="kauhajoki">Kauhajoki</option>
<option value="kauhava">Kauhava</option>
<option value="kauniainen">Kauniainen</option>
<option value="kaustinen">Kaustinen</option>
<option value="keitele">Keitele</option>
<option value="kemi">Kemi</option>
<option value="kemijarvi">Kemij\xe4rvi</option>
<option value="keminmaa">Keminmaa</option>
<option value="kemionsaari">Kemi\xf6nsaari</option>
<option value="kempele">Kempele</option>
<option value="kerava">Kerava</option>
<option value="kerimaki">Kerim\xe4ki</option>
<option value="kesalahti">Kes\xe4lahti</option>
<option value="keuruu">Keuruu</option>
<option value="kihnio">Kihni\xf6</option>
<option value="kiikoinen">Kiikoinen</option>
<option value="kiiminki">Kiiminki</option>
<option value="kinnula">Kinnula</option>
<option value="kirkkonummi">Kirkkonummi</option>
<option value="kitee">Kitee</option>
<option value="kittila">Kittil\xe4</option>
<option value="kiuruvesi">Kiuruvesi</option>
<option value="kivijarvi">Kivij\xe4rvi</option>
<option value="kokemaki">Kokem\xe4ki</option>
<option value="kokkola">Kokkola</option>
<option value="kolari">Kolari</option>
<option value="konnevesi">Konnevesi</option>
<option value="kontiolahti">Kontiolahti</option>
<option value="korsnas">Korsn\xe4s</option>
<option value="koskitl">Koski Tl</option>
<option value="kotka">Kotka</option>
<option value="kouvola">Kouvola</option>
<option value="kristiinankaupunki">Kristiinankaupunki</option>
<option value="kruunupyy">Kruunupyy</option>
<option value="kuhmalahti">Kuhmalahti</option>
<option value="kuhmo">Kuhmo</option>
<option value="kuhmoinen">Kuhmoinen</option>
<option value="kumlinge">Kumlinge</option>
<option value="kuopio">Kuopio</option>
<option value="kuortane">Kuortane</option>
<option value="kurikka">Kurikka</option>
<option value="kustavi">Kustavi</option>
<option value="kuusamo">Kuusamo</option>
<option value="kylmakoski">Kylm\xe4koski</option>
<option value="kyyjarvi">Kyyj\xe4rvi</option>
<option value="karkola">K\xe4rk\xf6l\xe4</option>
<option value="karsamaki">K\xe4rs\xe4m\xe4ki</option>
<option value="kokar">K\xf6kar</option>
<option value="koylio">K\xf6yli\xf6</option>
<option value="lahti">Lahti</option>
<option value="laihia">Laihia</option>
<option value="laitila">Laitila</option>
<option value="lapinjarvi">Lapinj\xe4rvi</option>
<option value="lapinlahti">Lapinlahti</option>
<option value="lappajarvi">Lappaj\xe4rvi</option>
<option value="lappeenranta">Lappeenranta</option>
<option value="lapua">Lapua</option>
<option value="laukaa">Laukaa</option>
<option value="lavia">Lavia</option>
<option value="lemi">Lemi</option>
<option value="lemland">Lemland</option>
<option value="lempaala">Lemp\xe4\xe4l\xe4</option>
<option value="leppavirta">Lepp\xe4virta</option>
<option value="lestijarvi">Lestij\xe4rvi</option>
<option value="lieksa">Lieksa</option>
<option value="lieto">Lieto</option>
<option value="liminka">Liminka</option>
<option value="liperi">Liperi</option>
<option value="lohja">Lohja</option>
<option value="loimaa">Loimaa</option>
<option value="loppi">Loppi</option>
<option value="loviisa">Loviisa</option>
<option value="luhanka">Luhanka</option>
<option value="lumijoki">Lumijoki</option>
<option value="lumparland">Lumparland</option>
<option value="luoto">Luoto</option>
<option value="luumaki">Luum\xe4ki</option>
<option value="luvia">Luvia</option>
<option value="lansi-turunmaa">L\xe4nsi-Turunmaa</option>
<option value="maalahti">Maalahti</option>
<option value="maaninka">Maaninka</option>
<option value="maarianhamina">Maarianhamina</option>
<option value="marttila">Marttila</option>
<option value="masku">Masku</option>
<option value="merijarvi">Merij\xe4rvi</option>
<option value="merikarvia">Merikarvia</option>
<option value="miehikkala">Miehikk\xe4l\xe4</option>
<option value="mikkeli">Mikkeli</option>
<option value="muhos">Muhos</option>
<option value="multia">Multia</option>
<option value="muonio">Muonio</option>
<option value="mustasaari">Mustasaari</option>
<option value="muurame">Muurame</option>
<option value="mynamaki">Myn\xe4m\xe4ki</option>
<option value="myrskyla">Myrskyl\xe4</option>
<option value="mantsala">M\xe4nts\xe4l\xe4</option>
<option value="mantta-vilppula">M\xe4ntt\xe4-Vilppula</option>
<option value="mantyharju">M\xe4ntyharju</option>
<option value="naantali">Naantali</option>
<option value="nakkila">Nakkila</option>
<option value="nastola">Nastola</option>
<option value="nilsia">Nilsi\xe4</option>
<option value="nivala">Nivala</option>
<option value="nokia">Nokia</option>
<option value="nousiainen">Nousiainen</option>
<option value="nummi-pusula">Nummi-Pusula</option>
<option value="nurmes">Nurmes</option>
<option value="nurmijarvi">Nurmij\xe4rvi</option>
<option value="narpio">N\xe4rpi\xf6</option>
<option value="oravainen">Oravainen</option>
<option value="orimattila">Orimattila</option>
<option value="oripaa">Orip\xe4\xe4</option>
<option value="orivesi">Orivesi</option>
<option value="oulainen">Oulainen</option>
<option value="oulu">Oulu</option>
<option value="oulunsalo">Oulunsalo</option>
<option value="outokumpu">Outokumpu</option>
<option value="padasjoki">Padasjoki</option>
<option value="paimio">Paimio</option>
<option value="paltamo">Paltamo</option>
<option value="parikkala">Parikkala</option>
<option value="parkano">Parkano</option>
<option value="pedersore">Peders\xf6re</option>
<option value="pelkosenniemi">Pelkosenniemi</option>
<option value="pello">Pello</option>
<option value="perho">Perho</option>
<option value="pertunmaa">Pertunmaa</option>
<option value="petajavesi">Pet\xe4j\xe4vesi</option>
<option value="pieksamaki">Pieks\xe4m\xe4ki</option>
<option value="pielavesi">Pielavesi</option>
<option value="pietarsaari">Pietarsaari</option>
<option value="pihtipudas">Pihtipudas</option>
<option value="pirkkala">Pirkkala</option>
<option value="polvijarvi">Polvij\xe4rvi</option>
<option value="pomarkku">Pomarkku</option>
<option value="pori">Pori</option>
<option value="pornainen">Pornainen</option>
<option value="porvoo">Porvoo</option>
<option value="posio">Posio</option>
<option value="pudasjarvi">Pudasj\xe4rvi</option>
<option value="pukkila">Pukkila</option>
<option value="punkaharju">Punkaharju</option>
<option value="punkalaidun">Punkalaidun</option>
<option value="puolanka">Puolanka</option>
<option value="puumala">Puumala</option>
<option value="pyhtaa">Pyht\xe4\xe4</option>
<option value="pyhajoki">Pyh\xe4joki</option>
<option value="pyhajarvi">Pyh\xe4j\xe4rvi</option>
<option value="pyhanta">Pyh\xe4nt\xe4</option>
<option value="pyharanta">Pyh\xe4ranta</option>
<option value="palkane">P\xe4lk\xe4ne</option>
<option value="poytya">P\xf6yty\xe4</option>
<option value="raahe">Raahe</option>
<option value="raasepori">Raasepori</option>
<option value="raisio">Raisio</option>
<option value="rantasalmi">Rantasalmi</option>
<option value="ranua">Ranua</option>
<option value="rauma">Rauma</option>
<option value="rautalampi">Rautalampi</option>
<option value="rautavaara">Rautavaara</option>
<option value="rautjarvi">Rautj\xe4rvi</option>
<option value="reisjarvi">Reisj\xe4rvi</option>
<option value="riihimaki">Riihim\xe4ki</option>
<option value="ristiina">Ristiina</option>
<option value="ristijarvi">Ristij\xe4rvi</option>
<option value="rovaniemi">Rovaniemi</option>
<option value="ruokolahti">Ruokolahti</option>
<option value="ruovesi">Ruovesi</option>
<option value="rusko">Rusko</option>
<option value="raakkyla">R\xe4\xe4kkyl\xe4</option>
<option value="saarijarvi">Saarij\xe4rvi</option>
<option value="salla">Salla</option>
<option value="salo">Salo</option>
<option value="saltvik">Saltvik</option>
<option value="sastamala">Sastamala</option>
<option value="sauvo">Sauvo</option>
<option value="savitaipale">Savitaipale</option>
<option value="savonlinna">Savonlinna</option>
<option value="savukoski">Savukoski</option>
<option value="seinajoki">Sein\xe4joki</option>
<option value="sievi">Sievi</option>
<option value="siikainen">Siikainen</option>
<option value="siikajoki">Siikajoki</option>
<option value="siikalatva">Siikalatva</option>
<option value="siilinjarvi">Siilinj\xe4rvi</option>
<option value="simo">Simo</option>
<option value="sipoo">Sipoo</option>
<option value="siuntio">Siuntio</option>
<option value="sodankyla">Sodankyl\xe4</option>
<option value="soini">Soini</option>
<option value="somero">Somero</option>
<option value="sonkajarvi">Sonkaj\xe4rvi</option>
<option value="sotkamo">Sotkamo</option>
<option value="sottunga">Sottunga</option>
<option value="sulkava">Sulkava</option>
<option value="sund">Sund</option>
<option value="suomenniemi">Suomenniemi</option>
<option value="suomussalmi">Suomussalmi</option>
<option value="suonenjoki">Suonenjoki</option>
<option value="sysma">Sysm\xe4</option>
<option value="sakyla">S\xe4kyl\xe4</option>
<option value="taipalsaari">Taipalsaari</option>
<option value="taivalkoski">Taivalkoski</option>
<option value="taivassalo">Taivassalo</option>
<option value="tammela">Tammela</option>
<option value="tampere">Tampere</option>
<option value="tarvasjoki">Tarvasjoki</option>
<option value="tervo">Tervo</option>
<option value="tervola">Tervola</option>
<option value="teuva">Teuva</option>
<option value="tohmajarvi">Tohmaj\xe4rvi</option>
<option value="toholampi">Toholampi</option>
<option value="toivakka">Toivakka</option>
<option value="tornio">Tornio</option>
<option value="turku" selected="selected">Turku</option>
<option value="tuusniemi">Tuusniemi</option>
<option value="tuusula">Tuusula</option>
<option value="tyrnava">Tyrn\xe4v\xe4</option>
<option value="toysa">T\xf6ys\xe4</option>
<option value="ulvila">Ulvila</option>
<option value="urjala">Urjala</option>
<option value="utajarvi">Utaj\xe4rvi</option>
<option value="utsjoki">Utsjoki</option>
<option value="uurainen">Uurainen</option>
<option value="uusikaarlepyy">Uusikaarlepyy</option>
<option value="uusikaupunki">Uusikaupunki</option>
<option value="vaala">Vaala</option>
<option value="vaasa">Vaasa</option>
<option value="valkeakoski">Valkeakoski</option>
<option value="valtimo">Valtimo</option>
<option value="vantaa">Vantaa</option>
<option value="varkaus">Varkaus</option>
<option value="varpaisjarvi">Varpaisj\xe4rvi</option>
<option value="vehmaa">Vehmaa</option>
<option value="vesanto">Vesanto</option>
<option value="vesilahti">Vesilahti</option>
<option value="veteli">Veteli</option>
<option value="vierema">Vierem\xe4</option>
<option value="vihanti">Vihanti</option>
<option value="vihti">Vihti</option>
<option value="viitasaari">Viitasaari</option>
<option value="vimpeli">Vimpeli</option>
<option value="virolahti">Virolahti</option>
<option value="virrat">Virrat</option>
<option value="vardo">V\xe5rd\xf6</option>
<option value="vahakyro">V\xe4h\xe4kyr\xf6</option>
<option value="voyri-maksamaa">V\xf6yri-Maksamaa</option>
<option value="yli-ii">Yli-Ii</option>
<option value="ylitornio">Ylitornio</option>
<option value="ylivieska">Ylivieska</option>
<option value="ylojarvi">Yl\xf6j\xe4rvi</option>
<option value="ypaja">Yp\xe4j\xe4</option>
<option value="ahtari">\xc4ht\xe4ri</option>
<option value="aanekoski">\xc4\xe4nekoski</option>
</select>'''
self.assertEqual(f.render('municipalities', 'turku'), out)
def test_FIZipCodeField(self):
error_format = [u'Enter a zip code in the format XXXXX.']
valid = {
'20540': '20540',
'20101': '20101',
}
invalid = {
'20s40': error_format,
'205401': error_format
}
self.assertFieldOutput(FIZipCodeField, valid, invalid)
def test_FISocialSecurityNumber(self):
error_invalid = [u'Enter a valid Finnish social security number.']
valid = {
'010101-0101': '010101-0101',
'010101+0101': '010101+0101',
'010101A0101': '010101A0101',
}
invalid = {
'101010-0102': error_invalid,
'10a010-0101': error_invalid,
'101010-0\xe401': error_invalid,
'101010b0101': error_invalid,
}
self.assertFieldOutput(FISocialSecurityNumber, valid, invalid)
| gpl-3.0 |
ArcherCraftStore/ArcherVMPeridot | Python/Lib/hmac.py | 142 | 5063 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from _operator import _compare_digest as compare_digest
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object. *OR*
A hash name suitable for hashlib.new().
Defaults to hashlib.md5.
Implicit default to hashlib.md5 is deprecated and will be
removed in Python 3.6.
Note: key and msg must be a bytes or bytearray objects.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if digestmod is None:
_warnings.warn("HMAC() without an explicit digestmod argument "
"is deprecated.", PendingDeprecationWarning, 2)
digestmod = _hashlib.md5
if callable(digestmod):
self.digest_cons = digestmod
elif isinstance(digestmod, str):
self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
self.digest_cons = lambda d=b'': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + bytes(blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
return "hmac-" + self.inner.name
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| apache-2.0 |
kaksmet/servo | tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/msgutil.py | 658 | 7598 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Message related utilities.
Note: request.connection.write/read are used in this module, even though
mod_python document says that they should be used only in connection
handlers. Unfortunately, we have no other options. For example,
request.write/read are not suitable because they don't allow direct raw
bytes writing/reading.
"""
import Queue
import threading
# Export Exception symbols from msgutil for backward compatibility
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import UnsupportedFrameException
# An API for handler to send/receive WebSocket messages.
def close_connection(request):
"""Close connection.
Args:
request: mod_python request.
"""
request.ws_stream.close_connection()
def send_message(request, payload_data, end=True, binary=False):
"""Send a message (or part of a message).
Args:
request: mod_python request.
payload_data: unicode text or str binary to send.
end: True to terminate a message.
False to send payload_data as part of a message that is to be
terminated by next or later send_message call with end=True.
binary: send payload_data as binary frame(s).
Raises:
BadOperationException: when server already terminated.
"""
request.ws_stream.send_message(payload_data, end, binary)
def receive_message(request):
"""Receive a WebSocket frame and return its payload as a text in
unicode or a binary in str.
Args:
request: mod_python request.
Raises:
InvalidFrameException: when client send invalid frame.
UnsupportedFrameException: when client send unsupported frame e.g. some
of reserved bit is set but no extension can
recognize it.
InvalidUTF8Exception: when client send a text frame containing any
invalid UTF-8 string.
ConnectionTerminatedException: when the connection is closed
unexpectedly.
BadOperationException: when client already terminated.
"""
return request.ws_stream.receive_message()
def send_ping(request, body=''):
request.ws_stream.send_ping(body)
class MessageReceiver(threading.Thread):
"""This class receives messages from the client.
This class provides three ways to receive messages: blocking,
non-blocking, and via callback. Callback has the highest precedence.
Note: This class should not be used with the standalone server for wss
because pyOpenSSL used by the server raises a fatal error if the socket
is accessed from multiple threads.
"""
def __init__(self, request, onmessage=None):
"""Construct an instance.
Args:
request: mod_python request.
onmessage: a function to be called when a message is received.
May be None. If not None, the function is called on
another thread. In that case, MessageReceiver.receive
and MessageReceiver.receive_nowait are useless
because they will never return any messages.
"""
threading.Thread.__init__(self)
self._request = request
self._queue = Queue.Queue()
self._onmessage = onmessage
self._stop_requested = False
self.setDaemon(True)
self.start()
def run(self):
try:
while not self._stop_requested:
message = receive_message(self._request)
if self._onmessage:
self._onmessage(message)
else:
self._queue.put(message)
finally:
close_connection(self._request)
def receive(self):
""" Receive a message from the channel, blocking.
Returns:
message as a unicode string.
"""
return self._queue.get()
def receive_nowait(self):
""" Receive a message from the channel, non-blocking.
Returns:
message as a unicode string if available. None otherwise.
"""
try:
message = self._queue.get_nowait()
except Queue.Empty:
message = None
return message
def stop(self):
"""Request to stop this instance.
The instance will be stopped after receiving the next message.
This method may not be very useful, but there is no clean way
in Python to forcefully stop a running thread.
"""
self._stop_requested = True
class MessageSender(threading.Thread):
"""This class sends messages to the client.
This class provides both synchronous and asynchronous ways to send
messages.
Note: This class should not be used with the standalone server for wss
because pyOpenSSL used by the server raises a fatal error if the socket
is accessed from multiple threads.
"""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
threading.Thread.__init__(self)
self._request = request
self._queue = Queue.Queue()
self.setDaemon(True)
self.start()
def run(self):
while True:
message, condition = self._queue.get()
condition.acquire()
send_message(self._request, message)
condition.notify()
condition.release()
def send(self, message):
"""Send a message, blocking."""
condition = threading.Condition()
condition.acquire()
self._queue.put((message, condition))
condition.wait()
def send_nowait(self, message):
"""Send a message, non-blocking."""
self._queue.put((message, threading.Condition()))
# vi:sts=4 sw=4 et
| mpl-2.0 |
ningchi/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 44 | 7663 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
"""Compute score for random uniform cluster labelings"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
"""Check that adjusted scores are almost zero on random labels"""
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
"""Compute the Adjusted Mutual Information and test against known values"""
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
"""Check numerical stability when information is exactly zero"""
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
"""Check relation between v_measure, entropy and mutual information"""
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
uw-it-aca/uw-restclients | restclients/models/myplan.py | 1 | 2722 | from restclients_core import models
class MyPlan(models.Model):
def __init__(self):
self.terms = []
def json_data(self):
data = {
"terms": []
}
for term in self.terms:
data["terms"].append(term.json_data())
return data
class MyPlanTerm(models.Model):
SPRING = 'spring'
SUMMER = 'summer'
AUTUMN = 'autumn'
WINTER = 'winter'
QUARTERNAME_CHOICES = (
(SPRING, 'Spring'),
(SUMMER, 'Summer'),
(AUTUMN, 'Autumn'),
(WINTER, 'Winter'),
)
course_search_href = models.CharField(max_length=512)
degree_audit_href = models.CharField(max_length=512)
myplan_href = models.CharField(max_length=512)
registration_href = models.CharField(max_length=512)
registered_courses_count = models.SmallIntegerField(
max_length=3, default=0)
registered_sections_count = models.SmallIntegerField(
max_length=3, default=0)
def __init__(self):
self.courses = []
quarter = models.CharField(max_length=6,
choices=QUARTERNAME_CHOICES)
year = models.PositiveSmallIntegerField()
def json_data(self):
data = {
"year": self.year,
"quarter": self.quarter,
"course_search_href": self.course_search_href,
"degree_audit_href": self.degree_audit_href,
"myplan_href": self.myplan_href,
"registration_href": self.registration_href,
"registered_courses_count": self.registered_courses_count,
"registered_sections_count": self.registered_sections_count,
"courses": [],
}
for course in self.courses:
data["courses"].append(course.json_data())
return data
class MyPlanCourse(models.Model):
def __init__(self):
self.sections = []
curriculum_abbr = models.CharField(max_length=6,
db_index=True)
course_number = models.PositiveSmallIntegerField(db_index=True)
registrations_available = models.BooleanField()
def json_data(self):
data = {
'curriculum_abbr': self.curriculum_abbr,
'course_number': self.course_number,
'registrations_available': self.registrations_available,
'sections': [],
}
for section in self.sections:
data["sections"].append(section.json_data())
return data
class MyPlanCourseSection(models.Model):
section_id = models.CharField(max_length=2,
db_index=True)
def json_data(self):
return {
"section_id": self.section_id
}
| apache-2.0 |
logicrime/google-diff-match-patch | python2/diff_match_patch_test.py | 319 | 41744 | #!/usr/bin/python2.4
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import time
import unittest
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEquals(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEquals(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEquals(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEquals(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEquals(0, self.dmp.diff_commonOverlap("fi", u"\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEquals(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEquals(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEquals(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEquals(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEquals(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEquals(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEquals(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEquals(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEquals(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEquals(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEquals(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEquals(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEquals(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEquals(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
self.assertEquals((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEquals([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEquals([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEquals("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEquals("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEquals("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
try:
self.dmp.diff_fromDelta("", "+%c3xy")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, u"\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, u"\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, u"\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals(u"\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEquals("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEquals(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEquals(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEquals(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEquals([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxint))
# Timeout.
self.assertEquals([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEquals([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, u"\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", u"\u0680x\x00", False))
# Overlaps.
self.assertEquals([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEquals([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEquals([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in range(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEquals(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEquals({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEquals({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEquals(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEquals(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEquals(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEquals(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEquals(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEquals(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEquals(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEquals(3, self.dmp.match_main("abcdef", "", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEquals(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEquals("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEquals([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEquals(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEquals("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEquals("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEquals("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEquals("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEquals("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEquals("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEquals("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEquals(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEquals(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEquals("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEquals("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEquals("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEquals(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEquals(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEquals(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEquals(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEquals(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEquals(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEquals(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
Hurence/log-island | logisland-components/logisland-processors/logisland-processor-scripting/src/main/resources/nltk/tokenize/regexp.py | 7 | 7932 | # Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com>
# Trevor Cohn <tacohn@csse.unimelb.edu.au>
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
r"""
Regular-Expression Tokenizers
A ``RegexpTokenizer`` splits a string into substrings using a regular expression.
For example, the following tokenizer forms tokens out of alphabetic sequences,
money expressions, and any other non-whitespace sequences:
>>> from nltk.tokenize import RegexpTokenizer
>>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
>>> tokenizer = RegexpTokenizer('\w+|\$[\d\.]+|\S+')
>>> tokenizer.tokenize(s)
['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
A ``RegexpTokenizer`` can use its regexp to match delimiters instead:
>>> tokenizer = RegexpTokenizer('\s+', gaps=True)
>>> tokenizer.tokenize(s)
['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',
'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']
Note that empty tokens are not returned when the delimiter appears at
the start or end of the string.
The material between the tokens is discarded. For example,
the following tokenizer selects just the capitalized words:
>>> capword_tokenizer = RegexpTokenizer('[A-Z]\w+')
>>> capword_tokenizer.tokenize(s)
['Good', 'New', 'York', 'Please', 'Thanks']
This module contains several subclasses of ``RegexpTokenizer``
that use pre-defined regular expressions.
>>> from nltk.tokenize import BlanklineTokenizer
>>> # Uses '\s*\n\s*\n\s*':
>>> BlanklineTokenizer().tokenize(s)
['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.',
'Thanks.']
All of the regular expression tokenizers are also available as functions:
>>> from nltk.tokenize import regexp_tokenize, wordpunct_tokenize, blankline_tokenize
>>> regexp_tokenize(s, pattern='\w+|\$[\d\.]+|\S+')
['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
>>> wordpunct_tokenize(s)
['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York',
'.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
>>> blankline_tokenize(s)
['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.', 'Thanks.']
Caution: The function ``regexp_tokenize()`` takes the text as its
first argument, and the regular expression pattern as its second
argument. This differs from the conventions used by Python's
``re`` functions, where the pattern is always the first argument.
(This is for consistency with the other NLTK tokenizers.)
"""
from __future__ import unicode_literals
import re
from nltk.tokenize.api import TokenizerI
from nltk.tokenize.util import regexp_span_tokenize
from nltk.compat import python_2_unicode_compatible
@python_2_unicode_compatible
class RegexpTokenizer(TokenizerI):
"""
A tokenizer that splits a string using a regular expression, which
matches either the tokens or the separators between tokens.
>>> tokenizer = RegexpTokenizer('\w+|\$[\d\.]+|\S+')
:type pattern: str
:param pattern: The pattern used to build this tokenizer.
(This pattern must not contain capturing parentheses;
Use non-capturing parentheses, e.g. (?:...), instead)
:type gaps: bool
:param gaps: True if this tokenizer's pattern should be used
to find separators between tokens; False if this
tokenizer's pattern should be used to find the tokens
themselves.
:type discard_empty: bool
:param discard_empty: True if any empty tokens `''`
generated by the tokenizer should be discarded. Empty
tokens can only be generated if `_gaps == True`.
:type flags: int
:param flags: The regexp flags used to compile this
tokenizer's pattern. By default, the following flags are
used: `re.UNICODE | re.MULTILINE | re.DOTALL`.
"""
def __init__(self, pattern, gaps=False, discard_empty=True,
flags=re.UNICODE | re.MULTILINE | re.DOTALL):
# If they gave us a regexp object, extract the pattern.
pattern = getattr(pattern, 'pattern', pattern)
self._pattern = pattern
self._gaps = gaps
self._discard_empty = discard_empty
self._flags = flags
self._regexp = None
def _check_regexp(self):
if self._regexp is None:
self._regexp = re.compile(self._pattern, self._flags)
def tokenize(self, text):
self._check_regexp()
# If our regexp matches gaps, use re.split:
if self._gaps:
if self._discard_empty:
return [tok for tok in self._regexp.split(text) if tok]
else:
return self._regexp.split(text)
# If our regexp matches tokens, use re.findall:
else:
return self._regexp.findall(text)
def span_tokenize(self, text):
self._check_regexp()
if self._gaps:
for left, right in regexp_span_tokenize(text, self._regexp):
if not (self._discard_empty and left == right):
yield left, right
else:
for m in re.finditer(self._regexp, text):
yield m.span()
def __repr__(self):
return ('%s(pattern=%r, gaps=%r, discard_empty=%r, flags=%r)' %
(self.__class__.__name__, self._pattern, self._gaps,
self._discard_empty, self._flags))
class WhitespaceTokenizer(RegexpTokenizer):
r"""
Tokenize a string on whitespace (space, tab, newline).
In general, users should use the string ``split()`` method instead.
>>> from nltk.tokenize import WhitespaceTokenizer
>>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
>>> WhitespaceTokenizer().tokenize(s)
['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',
'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']
"""
def __init__(self):
RegexpTokenizer.__init__(self, r'\s+', gaps=True)
class BlanklineTokenizer(RegexpTokenizer):
"""
Tokenize a string, treating any sequence of blank lines as a delimiter.
Blank lines are defined as lines containing no characters, except for
space or tab characters.
"""
def __init__(self):
RegexpTokenizer.__init__(self, r'\s*\n\s*\n\s*', gaps=True)
class WordPunctTokenizer(RegexpTokenizer):
"""
Tokenize a text into a sequence of alphabetic and
non-alphabetic characters, using the regexp ``\w+|[^\w\s]+``.
>>> from nltk.tokenize import WordPunctTokenizer
>>> s = "Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks."
>>> WordPunctTokenizer().tokenize(s)
['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York',
'.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
"""
def __init__(self):
RegexpTokenizer.__init__(self, r'\w+|[^\w\s]+')
######################################################################
#{ Tokenization Functions
######################################################################
def regexp_tokenize(text, pattern, gaps=False, discard_empty=True,
flags=re.UNICODE | re.MULTILINE | re.DOTALL):
"""
Return a tokenized copy of *text*. See :class:`.RegexpTokenizer`
for descriptions of the arguments.
"""
tokenizer = RegexpTokenizer(pattern, gaps, discard_empty, flags)
return tokenizer.tokenize(text)
blankline_tokenize = BlanklineTokenizer().tokenize
wordpunct_tokenize = WordPunctTokenizer().tokenize
| apache-2.0 |
ChrisAntaki/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py | 122 | 2323 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
class AbstractSequencedCommand(Command):
steps = None
def __init__(self):
self._sequence = StepSequence(self.steps)
Command.__init__(self, self._sequence.options())
def _prepare_state(self, options, args, tool):
return None
def execute(self, options, args, tool):
try:
state = self._prepare_state(options, args, tool)
except ScriptError, e:
_log.error(e.message_with_output())
self._exit(e.exit_code or 2)
self._sequence.run_and_handle_errors(tool, options, state)
| bsd-3-clause |
tosanai/wbai_hackathon_2017 | agent/ml/vae.py | 1 | 5878 | import datetime
from threading import Thread, Lock
from keras import backend as K
from keras.models import clone_model, Model
from keras.layers import Input, Dense, Lambda
from keras.callbacks import TensorBoard
import tensorflow as tf
from config.model import TENSORBOARD_LOG_DIR
from config.model import VAE_MODEL
LOCK = Lock()
latent_dim = 3
epochs = 1
class VAE:
def __init__(self, x_shape, save_interval=100):
"""
Initialize VAE setting
:param x_shape: X shape(not x(i) shape)
"""
m, n = x_shape
hidden_unit_size = n >> 2
self.graph = tf.Graph()
with self.graph.as_default():
self.example = tf.placeholder(shape=(None, n), dtype=tf.float32)
self.queue = tf.FIFOQueue(capacity=20, dtypes=[tf.float32])
self.enqueue = self.queue.enqueue((self.example, ))
self.qr = tf.train.QueueRunner(self.queue, [self.enqueue] * 4)
self.coord = tf.train.Coordinator()
# x = Input(shape=(n, ), name='x')
x = Input(shape=(n, ), dtype=tf.float32, tensor=self.queue.dequeue(), name='x')
h1 = Dense(hidden_unit_size, activation='relu', dtype=tf.float32, name='h1')(x)
mean = Dense(latent_dim, name='mean')(h1)
var = Dense(latent_dim, name='var')(h1)
def sampling(args):
z_mean, z_var = args
epsilon = K.random_normal(shape=K.shape(z_var))
return z_mean + z_var * epsilon
# return z_mean + K.exp(z_var / 2) * epsilon
z = Lambda(sampling, name='z')([mean, var])
decoder_h1 = Dense(hidden_unit_size, activation='relu', name='decoder_h1')(z)
y = Dense(n, activation='sigmoid', name='y')(decoder_h1)
def loss(y_true, y_pred):
kld = (-1 / 2) * (K.sum(1 + K.log(K.square(var)) - K.square(mean) - K.square(var), axis=1))
# kld = (-1 / 2) * K.sum(1 + var - K.square(mean) - K.exp(var))
re = K.mean(K.sum(K.binary_crossentropy(y_true, y_pred), axis=1))
return K.mean(kld + re)
model = Model(inputs=x, outputs=y)
model.compile(optimizer='adam', loss=loss)
# using learn
self._model = model
# using predict without being affected by learning
self.model = clone_model(self._model)
self.y = y
e_x = Input(shape=(n, ), name='e_x')
e_h1 = Dense(hidden_unit_size, activation='relu', name='e_h1')(e_x)
e_mean = Dense(latent_dim, name='e_mean')(e_h1)
e_var = Dense(latent_dim, name='e_var')(e_h1)
e_z = Lambda(sampling, name='e_z')([e_mean, e_var])
self.encoder = Model(inputs=e_x, outputs=e_z)
z_input = Input(shape=(latent_dim,))
d_h1 = Dense(hidden_unit_size, activation='relu', name='d_h1')(z_input)
d_y = Dense(n, activation='sigmoid', name='d_y')(d_h1)
self.decoder = Model(inputs=z_input, outputs=d_y)
# self.a = tf.placeholder(dtype=tf.float32, shape=(None, 2))
# self.b = tf.placeholder(dtype=tf.float32, shape=(None, 2))
# self.ab = self.a + self.b
self.session = tf.Session(graph=self.graph)
K.set_session(self.session)
def learn(self, x_train, x_test=None):
if x_test is not None:
validation_data = (x_test, x_test)
else:
validation_data = None
enqueue_threads = self.qr.create_threads(self.session, coord=self.coord, start=True)
with LOCK:
for i in range(1):
self.session.run(self.enqueue, feed_dict={self.example: x_train})
self.coord.join(enqueue_threads)
# with tf.Session(graph=K.get_session().graph):
# self._model.fit(x=x_train, y=x_train, epochs=epochs, validation_data=validation_data,
# callbacks=[TensorBoard(log_dir=TENSORBOARD_LOG_DIR, histogram_freq=1)])
with LOCK:
w = self._model.get_weights()
self.model.set_weights(w)
self.encoder.set_weights(w[0:len(w) - 4])
self.decoder.set_weights(w[-4:])
self.model.save(VAE_MODEL + datetime.datetime.now().strftime("%Y%m%d%H%M%S") + '.h5')
def predict(self, x):
return self.decoder.predict(self.encoder.predict(x))
def encode(self, x):
# with K.get_session() as sess:
return self.encoder.predict(x)
def decode(self, z):
# with K.get_session() as sess:
return self.decoder.predict(z)
def _show_predict_image(self, x):
import matplotlib.pyplot as plt
import numpy as np
pred = self.predict(x)
plt.imshow(np.reshape(x[0], (28, 28)), cmap='Greys_r')
plt.show()
plt.imshow(np.reshape(pred[0], (28, 28)), cmap='Greys_r')
plt.show()
plt.imshow(np.reshape(x[5000], (28, 28)), cmap='Greys_r')
plt.show()
plt.imshow(np.reshape(pred[5000], (28, 28)), cmap='Greys_r')
plt.show()
def _main(args):
x_train, x_test = args
vae = VAE(x_shape=x_train.shape)
for _ in range(2):
thread = Thread(target=vae.learn, kwargs={'x_train': x_train, 'x_test': x_test})
thread.start()
# vae.learn(x_train, x_test)
# vae.learn(x_train, x_test)
# print(thread.is_alive())
# thread.join()
# print(thread.is_alive())
# vae._show_predict_image(x_test)
if __name__ == '__main__':
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
_main((x_train, x_test))
| apache-2.0 |
quantum5/UnicodeBrowser | UnicodeBrowser/wsgi.py | 1 | 1443 | """
WSGI config for UnicodeBrowser project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "UnicodeBrowser.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "UnicodeBrowser.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| agpl-3.0 |
luther07/dak | tools/logs.py | 9 | 4984 | #!/usr/bin/python
# (c) 2008 Thomas Viehmann
# Free software licensed under the GPL version 2 or later
import os,re,datetime, sys
import tempfile, time
ITEMS_TO_KEEP = 20
CACHE_FILE = '/srv/ftp-master.debian.org/misc/dinstall_time_cache'
GRAPH_DIR = '/srv/ftp.debian.org/web/stat'
LINE = re.compile(r'(?:|.*/)dinstall_(\d{4})\.(\d{2})\.(\d{2})-(\d{2}):(\d{2}):(\d{2})\.log(?:\.bz2)?:'+
r'Archive maintenance timestamp \(([^\)]*)\): (\d{2}):(\d{2}):(\d{2})$')
UNSAFE = re.compile(r'[^a-zA-Z/\._:0-9\- ]')
graphs = {"dinstall1": {"keystolist":["pg_dump1", "i18n 1", "accepted", "dominate", "generate-filelist", "apt-ftparchive",
"pdiff", "release files", "w-b", "i18n 2", "apt-ftparchive cleanup"],
"showothers":True},
"dinstall2": {"keystolist":['External Updates', 'p-u-new', 'o-p-u-new', 'cruft', 'import-keyring', 'overrides', 'cleanup', 'scripts', 'mirror hardlinks', 'stats', 'compress', "pkg-file-mapping" ],
"showothers":False},
"totals":{"keystolist":["apt-ftparchive", "apt-ftparchive cleanup"],"showothers":True}}
#'mirror hardlinks', 'apt-ftparchive', 'logremove', 'startup', 'import-keyring', 'release files', 'accepted', 'stats', 'o-p-u-new', 'i18n 2', 'locked part finished', 'i18n 1', 'cruft', 'pdiff', 'init', 'cleanup', , 'p-u-new', 'run-parts', 'compress', 'scripts', 'expire_dumps', 'removed', 'make-suite-file-list', 'pg_dump1', 'pg_dump2', 'overrides', 'reports', 'merkel projectb push', 'buildd', 'apt-ftparchive cleanup', 'w-b'
wantkeys = set()
for tmp in graphs.values():
wantkeys |= set(tmp["keystolist"])
d = {}
kl = []
ks = set()
if os.path.exists(CACHE_FILE):
for l in open(CACHE_FILE):
dt, l = l.split('\t',1)
l = map(lambda x: (lambda y: (y[0],float(y[1])))(x.split(':',1)), l.split('\t'))
newk = [x[0] for x in l if x[0] not in ks]
kl += newk
ks |= set(newk)
d[dt] = dict(l)
olddt = None
args = sys.argv[1:]
m = UNSAFE.search(' '.join(args))
if m:
raise Exception("I don't like command line arguments including char '%s'"%m.group(0))
if args:
for l in os.popen('bzgrep -H "^Archive maintenance timestamp" "'+'" "'.join(args)+'"'):
m = LINE.match(l)
if not m:
raise Exception("woops '%s'"%l)
g = map(lambda x: (not x.isdigit() and x) or int(x), m.groups())
dt = datetime.datetime(*g[:6])
if olddt != dt:
oldsecs = 0
olddt = dt
dt2 = datetime.datetime(*(g[:3]+g[-3:]))
secs = (dt2-dt).seconds
assert secs >= 0 # should add 24*60*60
k = g[6]
d.setdefault(str(dt),{})[k] = (secs-oldsecs)/60.0
oldsecs = secs
if k not in ks:
ks.add(k)
kl.append(k)
if (wantkeys-ks):
print >> sys.stderr, "warning, requested keys not found in any log: "+' '.join(wantkeys-ks)
datakeys = d.keys()
datakeys.sort()
f = open(CACHE_FILE+".tmp","w")
for dk in datakeys:
print >> f, dk+'\t'+'\t'.join(
["%s:%s"%(k,str(d[dk][k])) for k in kl if k in d[dk]])
f.close()
os.rename(CACHE_FILE+".tmp", CACHE_FILE)
datakeys = datakeys[-ITEMS_TO_KEEP:]
def dump_file(outfn,keystolist, showothers):
showothers = (showothers and 1) or 0
# careful, outfn is NOT ESCAPED
f = tempfile.NamedTemporaryFile()
otherkeys = ks-set(keystolist)
print >>f, '\t'.join(keystolist+showothers*['other'])
for k in datakeys:
v = d[k]
others = sum(map(lambda x: v.get(x,0),otherkeys))
print >>f, k+'\t'+'\t'.join(map(lambda x: str(v.get(x,0)), keystolist)+showothers*[str(others)])
f.flush()
n = f.name
p = os.popen("R --vanilla --slave > /dev/null","w")
p.write("""
d = read.table("%(datafile)s", sep = "\t")
#d[["ts"]] <- as.POSIXct(d[["timestamp"]])
k = setdiff(names(d),c("ts","timestamp"))
#palette(rainbow(max(length(k),2)))
palette(c("midnightblue", "gold", "turquoise", "plum4", "palegreen1", "OrangeRed", "green4", "blue",
"magenta", "darkgoldenrod3", "tomato4", "violetred2","thistle4", "steelblue2", "springgreen4", "salmon","gray"))
#plot(d[["runtime"]],d[["compress"]],type="l",col="blue")
#lines(d[["runtime"]],d[["logremove"]],type="l",col="red")
#legend(as.POSIXct("2008-12-05"),9500,"logremove",col="red",lty=1)
bitmap(file = "%(outfile)s", type="png16m",width=16.9,height=11.8)
#plot(d[["ts"]],d[["compress"]],type="l",col="blue")
#lines(d[["ts"]],d[["logremove"]],type="l",col="red")
barplot(t(d[,k]), col=palette(), xlab="date",ylab="time/minutes"
)
par(xpd = TRUE)
legend(xinch(-1.2),par("usr")[4]+yinch(1),legend=k,
ncol=3,fill=1:15) #,xjust=1,yjust=1)
text(xinch(10),par("usr")[4]+yinch(.5),"%(title)s", cex=2)
dev.off()
q()
"""%{'datafile':n,'outfile':outfn,
'title':((not showothers)*"partial ")+"dinstall times"})
p.flush()
assert not p.close()
for afn,params in graphs.items():
dump_file(os.path.join(GRAPH_DIR,afn+'.png'), **params)
| gpl-2.0 |
sudheesh001/oh-mainline | vendor/packages/twisted/twisted/protocols/ftp.py | 18 | 93459 | # -*- test-case-name: twisted.test.test_ftp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An FTP protocol implementation
@author: Itamar Shtull-Trauring
@author: Jp Calderone
@author: Andrew Bennetts
"""
# System Imports
import os
import time
import re
import operator
import stat
import errno
import fnmatch
import warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
from zope.interface import Interface, implements
# Twisted Imports
from twisted import copyright
from twisted.internet import reactor, interfaces, protocol, error, defer
from twisted.protocols import basic, policies
from twisted.python import log, failure, filepath
from twisted.python.compat import reduce
from twisted.cred import error as cred_error, portal, credentials, checkers
# constants
# response codes
RESTART_MARKER_REPLY = "100"
SERVICE_READY_IN_N_MINUTES = "120"
DATA_CNX_ALREADY_OPEN_START_XFR = "125"
FILE_STATUS_OK_OPEN_DATA_CNX = "150"
CMD_OK = "200.1"
TYPE_SET_OK = "200.2"
ENTERING_PORT_MODE = "200.3"
CMD_NOT_IMPLMNTD_SUPERFLUOUS = "202"
SYS_STATUS_OR_HELP_REPLY = "211"
DIR_STATUS = "212"
FILE_STATUS = "213"
HELP_MSG = "214"
NAME_SYS_TYPE = "215"
SVC_READY_FOR_NEW_USER = "220.1"
WELCOME_MSG = "220.2"
SVC_CLOSING_CTRL_CNX = "221"
GOODBYE_MSG = "221"
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS = "225"
CLOSING_DATA_CNX = "226"
TXFR_COMPLETE_OK = "226"
ENTERING_PASV_MODE = "227"
ENTERING_EPSV_MODE = "229"
USR_LOGGED_IN_PROCEED = "230.1" # v1 of code 230
GUEST_LOGGED_IN_PROCEED = "230.2" # v2 of code 230
REQ_FILE_ACTN_COMPLETED_OK = "250"
PWD_REPLY = "257.1"
MKD_REPLY = "257.2"
USR_NAME_OK_NEED_PASS = "331.1" # v1 of Code 331
GUEST_NAME_OK_NEED_EMAIL = "331.2" # v2 of code 331
NEED_ACCT_FOR_LOGIN = "332"
REQ_FILE_ACTN_PENDING_FURTHER_INFO = "350"
SVC_NOT_AVAIL_CLOSING_CTRL_CNX = "421.1"
TOO_MANY_CONNECTIONS = "421.2"
CANT_OPEN_DATA_CNX = "425"
CNX_CLOSED_TXFR_ABORTED = "426"
REQ_ACTN_ABRTD_FILE_UNAVAIL = "450"
REQ_ACTN_ABRTD_LOCAL_ERR = "451"
REQ_ACTN_ABRTD_INSUFF_STORAGE = "452"
SYNTAX_ERR = "500"
SYNTAX_ERR_IN_ARGS = "501"
CMD_NOT_IMPLMNTD = "502"
BAD_CMD_SEQ = "503"
CMD_NOT_IMPLMNTD_FOR_PARAM = "504"
NOT_LOGGED_IN = "530.1" # v1 of code 530 - please log in
AUTH_FAILURE = "530.2" # v2 of code 530 - authorization failure
NEED_ACCT_FOR_STOR = "532"
FILE_NOT_FOUND = "550.1" # no such file or directory
PERMISSION_DENIED = "550.2" # permission denied
ANON_USER_DENIED = "550.3" # anonymous users can't alter filesystem
IS_NOT_A_DIR = "550.4" # rmd called on a path that is not a directory
REQ_ACTN_NOT_TAKEN = "550.5"
FILE_EXISTS = "550.6"
IS_A_DIR = "550.7"
PAGE_TYPE_UNK = "551"
EXCEEDED_STORAGE_ALLOC = "552"
FILENAME_NOT_ALLOWED = "553"
RESPONSE = {
# -- 100's --
RESTART_MARKER_REPLY: '110 MARK yyyy-mmmm', # TODO: this must be fixed
SERVICE_READY_IN_N_MINUTES: '120 service ready in %s minutes',
DATA_CNX_ALREADY_OPEN_START_XFR: '125 Data connection already open, starting transfer',
FILE_STATUS_OK_OPEN_DATA_CNX: '150 File status okay; about to open data connection.',
# -- 200's --
CMD_OK: '200 Command OK',
TYPE_SET_OK: '200 Type set to %s.',
ENTERING_PORT_MODE: '200 PORT OK',
CMD_NOT_IMPLMNTD_SUPERFLUOUS: '202 Command not implemented, superfluous at this site',
SYS_STATUS_OR_HELP_REPLY: '211 System status reply',
DIR_STATUS: '212 %s',
FILE_STATUS: '213 %s',
HELP_MSG: '214 help: %s',
NAME_SYS_TYPE: '215 UNIX Type: L8',
WELCOME_MSG: "220 %s",
SVC_READY_FOR_NEW_USER: '220 Service ready',
GOODBYE_MSG: '221 Goodbye.',
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS: '225 data connection open, no transfer in progress',
CLOSING_DATA_CNX: '226 Abort successful',
TXFR_COMPLETE_OK: '226 Transfer Complete.',
ENTERING_PASV_MODE: '227 Entering Passive Mode (%s).',
ENTERING_EPSV_MODE: '229 Entering Extended Passive Mode (|||%s|).', # where is epsv defined in the rfc's?
USR_LOGGED_IN_PROCEED: '230 User logged in, proceed',
GUEST_LOGGED_IN_PROCEED: '230 Anonymous login ok, access restrictions apply.',
REQ_FILE_ACTN_COMPLETED_OK: '250 Requested File Action Completed OK', #i.e. CWD completed ok
PWD_REPLY: '257 "%s"',
MKD_REPLY: '257 "%s" created',
# -- 300's --
'userotp': '331 Response to %s.', # ???
USR_NAME_OK_NEED_PASS: '331 Password required for %s.',
GUEST_NAME_OK_NEED_EMAIL: '331 Guest login ok, type your email address as password.',
REQ_FILE_ACTN_PENDING_FURTHER_INFO: '350 Requested file action pending further information.',
# -- 400's --
SVC_NOT_AVAIL_CLOSING_CTRL_CNX: '421 Service not available, closing control connection.',
TOO_MANY_CONNECTIONS: '421 Too many users right now, try again in a few minutes.',
CANT_OPEN_DATA_CNX: "425 Can't open data connection.",
CNX_CLOSED_TXFR_ABORTED: '426 Transfer aborted. Data connection closed.',
REQ_ACTN_ABRTD_LOCAL_ERR: '451 Requested action aborted. Local error in processing.',
# -- 500's --
SYNTAX_ERR: "500 Syntax error: %s",
SYNTAX_ERR_IN_ARGS: '501 syntax error in argument(s) %s.',
CMD_NOT_IMPLMNTD: "502 Command '%s' not implemented",
BAD_CMD_SEQ: '503 Incorrect sequence of commands: %s',
CMD_NOT_IMPLMNTD_FOR_PARAM: "504 Not implemented for parameter '%s'.",
NOT_LOGGED_IN: '530 Please login with USER and PASS.',
AUTH_FAILURE: '530 Sorry, Authentication failed.',
NEED_ACCT_FOR_STOR: '532 Need an account for storing files',
FILE_NOT_FOUND: '550 %s: No such file or directory.',
PERMISSION_DENIED: '550 %s: Permission denied.',
ANON_USER_DENIED: '550 Anonymous users are forbidden to change the filesystem',
IS_NOT_A_DIR: '550 Cannot rmd, %s is not a directory',
FILE_EXISTS: '550 %s: File exists',
IS_A_DIR: '550 %s: is a directory',
REQ_ACTN_NOT_TAKEN: '550 Requested action not taken: %s',
EXCEEDED_STORAGE_ALLOC: '552 Requested file action aborted, exceeded file storage allocation',
FILENAME_NOT_ALLOWED: '553 Requested action not taken, file name not allowed'
}
class InvalidPath(Exception):
"""
Internal exception used to signify an error during parsing a path.
"""
def toSegments(cwd, path):
"""
Normalize a path, as represented by a list of strings each
representing one segment of the path.
"""
if path.startswith('/'):
segs = []
else:
segs = cwd[:]
for s in path.split('/'):
if s == '.' or s == '':
continue
elif s == '..':
if segs:
segs.pop()
else:
raise InvalidPath(cwd, path)
elif '\0' in s or '/' in s:
raise InvalidPath(cwd, path)
else:
segs.append(s)
return segs
def errnoToFailure(e, path):
"""
Map C{OSError} and C{IOError} to standard FTP errors.
"""
if e == errno.ENOENT:
return defer.fail(FileNotFoundError(path))
elif e == errno.EACCES or e == errno.EPERM:
return defer.fail(PermissionDeniedError(path))
elif e == errno.ENOTDIR:
return defer.fail(IsNotADirectoryError(path))
elif e == errno.EEXIST:
return defer.fail(FileExistsError(path))
elif e == errno.EISDIR:
return defer.fail(IsADirectoryError(path))
else:
return defer.fail()
class FTPCmdError(Exception):
"""
Generic exception for FTP commands.
"""
def __init__(self, *msg):
Exception.__init__(self, *msg)
self.errorMessage = msg
def response(self):
"""
Generate a FTP response message for this error.
"""
return RESPONSE[self.errorCode] % self.errorMessage
class FileNotFoundError(FTPCmdError):
"""
Raised when trying to access a non existent file or directory.
"""
errorCode = FILE_NOT_FOUND
class AnonUserDeniedError(FTPCmdError):
"""
Raised when an anonymous user issues a command that will alter the
filesystem
"""
errorCode = ANON_USER_DENIED
class PermissionDeniedError(FTPCmdError):
"""
Raised when access is attempted to a resource to which access is
not allowed.
"""
errorCode = PERMISSION_DENIED
class IsNotADirectoryError(FTPCmdError):
"""
Raised when RMD is called on a path that isn't a directory.
"""
errorCode = IS_NOT_A_DIR
class FileExistsError(FTPCmdError):
"""
Raised when attempted to override an existing resource.
"""
errorCode = FILE_EXISTS
class IsADirectoryError(FTPCmdError):
"""
Raised when DELE is called on a path that is a directory.
"""
errorCode = IS_A_DIR
class CmdSyntaxError(FTPCmdError):
"""
Raised when a command syntax is wrong.
"""
errorCode = SYNTAX_ERR
class CmdArgSyntaxError(FTPCmdError):
"""
Raised when a command is called with wrong value or a wrong number of
arguments.
"""
errorCode = SYNTAX_ERR_IN_ARGS
class CmdNotImplementedError(FTPCmdError):
"""
Raised when an unimplemented command is given to the server.
"""
errorCode = CMD_NOT_IMPLMNTD
class CmdNotImplementedForArgError(FTPCmdError):
"""
Raised when the handling of a parameter for a command is not implemented by
the server.
"""
errorCode = CMD_NOT_IMPLMNTD_FOR_PARAM
class FTPError(Exception):
pass
class PortConnectionError(Exception):
pass
class BadCmdSequenceError(FTPCmdError):
"""
Raised when a client sends a series of commands in an illogical sequence.
"""
errorCode = BAD_CMD_SEQ
class AuthorizationError(FTPCmdError):
"""
Raised when client authentication fails.
"""
errorCode = AUTH_FAILURE
def debugDeferred(self, *_):
log.msg('debugDeferred(): %s' % str(_), debug=True)
# -- DTP Protocol --
_months = [
None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
class DTP(object, protocol.Protocol):
implements(interfaces.IConsumer)
isConnected = False
_cons = None
_onConnLost = None
_buffer = None
def connectionMade(self):
self.isConnected = True
self.factory.deferred.callback(None)
self._buffer = []
def connectionLost(self, reason):
self.isConnected = False
if self._onConnLost is not None:
self._onConnLost.callback(None)
def sendLine(self, line):
self.transport.write(line + '\r\n')
def _formatOneListResponse(self, name, size, directory, permissions, hardlinks, modified, owner, group):
def formatMode(mode):
return ''.join([mode & (256 >> n) and 'rwx'[n % 3] or '-' for n in range(9)])
def formatDate(mtime):
now = time.gmtime()
info = {
'month': _months[mtime.tm_mon],
'day': mtime.tm_mday,
'year': mtime.tm_year,
'hour': mtime.tm_hour,
'minute': mtime.tm_min
}
if now.tm_year != mtime.tm_year:
return '%(month)s %(day)02d %(year)5d' % info
else:
return '%(month)s %(day)02d %(hour)02d:%(minute)02d' % info
format = ('%(directory)s%(permissions)s%(hardlinks)4d '
'%(owner)-9s %(group)-9s %(size)15d %(date)12s '
'%(name)s')
return format % {
'directory': directory and 'd' or '-',
'permissions': formatMode(permissions),
'hardlinks': hardlinks,
'owner': owner[:8],
'group': group[:8],
'size': size,
'date': formatDate(time.gmtime(modified)),
'name': name}
def sendListResponse(self, name, response):
self.sendLine(self._formatOneListResponse(name, *response))
# Proxy IConsumer to our transport
def registerProducer(self, producer, streaming):
return self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
self.transport.unregisterProducer()
self.transport.loseConnection()
def write(self, data):
if self.isConnected:
return self.transport.write(data)
raise Exception("Crap damn crap damn crap damn")
# Pretend to be a producer, too.
def _conswrite(self, bytes):
try:
self._cons.write(bytes)
except:
self._onConnLost.errback()
def dataReceived(self, bytes):
if self._cons is not None:
self._conswrite(bytes)
else:
self._buffer.append(bytes)
def _unregConsumer(self, ignored):
self._cons.unregisterProducer()
self._cons = None
del self._onConnLost
return ignored
def registerConsumer(self, cons):
assert self._cons is None
self._cons = cons
self._cons.registerProducer(self, True)
for chunk in self._buffer:
self._conswrite(chunk)
self._buffer = None
if self.isConnected:
self._onConnLost = d = defer.Deferred()
d.addBoth(self._unregConsumer)
return d
else:
self._cons.unregisterProducer()
self._cons = None
return defer.succeed(None)
def resumeProducing(self):
self.transport.resumeProducing()
def pauseProducing(self):
self.transport.pauseProducing()
def stopProducing(self):
self.transport.stopProducing()
class DTPFactory(protocol.ClientFactory):
"""
Client factory for I{data transfer process} protocols.
@ivar peerCheck: perform checks to make sure the ftp-pi's peer is the same
as the dtp's
@ivar pi: a reference to this factory's protocol interpreter
@ivar _state: Indicates the current state of the DTPFactory. Initially,
this is L{_IN_PROGRESS}. If the connection fails or times out, it is
L{_FAILED}. If the connection succeeds before the timeout, it is
L{_FINISHED}.
"""
_IN_PROGRESS = object()
_FAILED = object()
_FINISHED = object()
_state = _IN_PROGRESS
# -- configuration variables --
peerCheck = False
# -- class variables --
def __init__(self, pi, peerHost=None, reactor=None):
"""Constructor
@param pi: this factory's protocol interpreter
@param peerHost: if peerCheck is True, this is the tuple that the
generated instance will use to perform security checks
"""
self.pi = pi # the protocol interpreter that is using this factory
self.peerHost = peerHost # the from FTP.transport.peerHost()
self.deferred = defer.Deferred() # deferred will fire when instance is connected
self.delayedCall = None
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
def buildProtocol(self, addr):
log.msg('DTPFactory.buildProtocol', debug=True)
if self._state is not self._IN_PROGRESS:
return None
self._state = self._FINISHED
self.cancelTimeout()
p = DTP()
p.factory = self
p.pi = self.pi
self.pi.dtpInstance = p
return p
def stopFactory(self):
log.msg('dtpFactory.stopFactory', debug=True)
self.cancelTimeout()
def timeoutFactory(self):
log.msg('timed out waiting for DTP connection')
if self._state is not self._IN_PROGRESS:
return
self._state = self._FAILED
d = self.deferred
self.deferred = None
d.errback(
PortConnectionError(defer.TimeoutError("DTPFactory timeout")))
def cancelTimeout(self):
if self.delayedCall is not None and self.delayedCall.active():
log.msg('cancelling DTP timeout', debug=True)
self.delayedCall.cancel()
def setTimeout(self, seconds):
log.msg('DTPFactory.setTimeout set to %s seconds' % seconds)
self.delayedCall = self._reactor.callLater(seconds, self.timeoutFactory)
def clientConnectionFailed(self, connector, reason):
if self._state is not self._IN_PROGRESS:
return
self._state = self._FAILED
d = self.deferred
self.deferred = None
d.errback(PortConnectionError(reason))
# -- FTP-PI (Protocol Interpreter) --
class ASCIIConsumerWrapper(object):
def __init__(self, cons):
self.cons = cons
self.registerProducer = cons.registerProducer
self.unregisterProducer = cons.unregisterProducer
assert os.linesep == "\r\n" or len(os.linesep) == 1, "Unsupported platform (yea right like this even exists)"
if os.linesep == "\r\n":
self.write = cons.write
def write(self, bytes):
return self.cons.write(bytes.replace(os.linesep, "\r\n"))
class FileConsumer(object):
"""
A consumer for FTP input that writes data to a file.
@ivar fObj: a file object opened for writing, used to write data received.
@type fObj: C{file}
"""
implements(interfaces.IConsumer)
def __init__(self, fObj):
self.fObj = fObj
def registerProducer(self, producer, streaming):
self.producer = producer
assert streaming
def unregisterProducer(self):
self.producer = None
self.fObj.close()
def write(self, bytes):
self.fObj.write(bytes)
class FTPOverflowProtocol(basic.LineReceiver):
"""FTP mini-protocol for when there are too many connections."""
def connectionMade(self):
self.sendLine(RESPONSE[TOO_MANY_CONNECTIONS])
self.transport.loseConnection()
class FTP(object, basic.LineReceiver, policies.TimeoutMixin):
"""
Protocol Interpreter for the File Transfer Protocol
@ivar state: The current server state. One of L{UNAUTH},
L{INAUTH}, L{AUTHED}, L{RENAMING}.
@ivar shell: The connected avatar
@ivar binary: The transfer mode. If false, ASCII.
@ivar dtpFactory: Generates a single DTP for this session
@ivar dtpPort: Port returned from listenTCP
@ivar listenFactory: A callable with the signature of
L{twisted.internet.interfaces.IReactorTCP.listenTCP} which will be used
to create Ports for passive connections (mainly for testing).
@ivar passivePortRange: iterator used as source of passive port numbers.
@type passivePortRange: C{iterator}
"""
disconnected = False
# States an FTP can be in
UNAUTH, INAUTH, AUTHED, RENAMING = range(4)
# how long the DTP waits for a connection
dtpTimeout = 10
portal = None
shell = None
dtpFactory = None
dtpPort = None
dtpInstance = None
binary = True
passivePortRange = xrange(0, 1)
listenFactory = reactor.listenTCP
def reply(self, key, *args):
msg = RESPONSE[key] % args
self.sendLine(msg)
def connectionMade(self):
self.state = self.UNAUTH
self.setTimeout(self.timeOut)
self.reply(WELCOME_MSG, self.factory.welcomeMessage)
def connectionLost(self, reason):
# if we have a DTP protocol instance running and
# we lose connection to the client's PI, kill the
# DTP connection and close the port
if self.dtpFactory:
self.cleanupDTP()
self.setTimeout(None)
if hasattr(self.shell, 'logout') and self.shell.logout is not None:
self.shell.logout()
self.shell = None
self.transport = None
def timeoutConnection(self):
self.transport.loseConnection()
def lineReceived(self, line):
self.resetTimeout()
self.pauseProducing()
def processFailed(err):
if err.check(FTPCmdError):
self.sendLine(err.value.response())
elif (err.check(TypeError) and
err.value.args[0].find('takes exactly') != -1):
self.reply(SYNTAX_ERR, "%s requires an argument." % (cmd,))
else:
log.msg("Unexpected FTP error")
log.err(err)
self.reply(REQ_ACTN_NOT_TAKEN, "internal server error")
def processSucceeded(result):
if isinstance(result, tuple):
self.reply(*result)
elif result is not None:
self.reply(result)
def allDone(ignored):
if not self.disconnected:
self.resumeProducing()
spaceIndex = line.find(' ')
if spaceIndex != -1:
cmd = line[:spaceIndex]
args = (line[spaceIndex + 1:],)
else:
cmd = line
args = ()
d = defer.maybeDeferred(self.processCommand, cmd, *args)
d.addCallbacks(processSucceeded, processFailed)
d.addErrback(log.err)
# XXX It burnsss
# LineReceiver doesn't let you resumeProducing inside
# lineReceived atm
from twisted.internet import reactor
reactor.callLater(0, d.addBoth, allDone)
def processCommand(self, cmd, *params):
cmd = cmd.upper()
if self.state == self.UNAUTH:
if cmd == 'USER':
return self.ftp_USER(*params)
elif cmd == 'PASS':
return BAD_CMD_SEQ, "USER required before PASS"
else:
return NOT_LOGGED_IN
elif self.state == self.INAUTH:
if cmd == 'PASS':
return self.ftp_PASS(*params)
else:
return BAD_CMD_SEQ, "PASS required after USER"
elif self.state == self.AUTHED:
method = getattr(self, "ftp_" + cmd, None)
if method is not None:
return method(*params)
return defer.fail(CmdNotImplementedError(cmd))
elif self.state == self.RENAMING:
if cmd == 'RNTO':
return self.ftp_RNTO(*params)
else:
return BAD_CMD_SEQ, "RNTO required after RNFR"
def getDTPPort(self, factory):
"""
Return a port for passive access, using C{self.passivePortRange}
attribute.
"""
for portn in self.passivePortRange:
try:
dtpPort = self.listenFactory(portn, factory)
except error.CannotListenError:
continue
else:
return dtpPort
raise error.CannotListenError('', portn,
"No port available in range %s" %
(self.passivePortRange,))
def ftp_USER(self, username):
"""
First part of login. Get the username the peer wants to
authenticate as.
"""
if not username:
return defer.fail(CmdSyntaxError('USER requires an argument'))
self._user = username
self.state = self.INAUTH
if self.factory.allowAnonymous and self._user == self.factory.userAnonymous:
return GUEST_NAME_OK_NEED_EMAIL
else:
return (USR_NAME_OK_NEED_PASS, username)
# TODO: add max auth try before timeout from ip...
# TODO: need to implement minimal ABOR command
def ftp_PASS(self, password):
"""
Second part of login. Get the password the peer wants to
authenticate with.
"""
if self.factory.allowAnonymous and self._user == self.factory.userAnonymous:
# anonymous login
creds = credentials.Anonymous()
reply = GUEST_LOGGED_IN_PROCEED
else:
# user login
creds = credentials.UsernamePassword(self._user, password)
reply = USR_LOGGED_IN_PROCEED
del self._user
def _cbLogin((interface, avatar, logout)):
assert interface is IFTPShell, "The realm is busted, jerk."
self.shell = avatar
self.logout = logout
self.workingDirectory = []
self.state = self.AUTHED
return reply
def _ebLogin(failure):
failure.trap(cred_error.UnauthorizedLogin, cred_error.UnhandledCredentials)
self.state = self.UNAUTH
raise AuthorizationError
d = self.portal.login(creds, None, IFTPShell)
d.addCallbacks(_cbLogin, _ebLogin)
return d
def ftp_PASV(self):
"""Request for a passive connection
from the rfc::
This command requests the server-DTP to \"listen\" on a data port
(which is not its default data port) and to wait for a connection
rather than initiate one upon receipt of a transfer command. The
response to this command includes the host and port address this
server is listening on.
"""
# if we have a DTP port set up, lose it.
if self.dtpFactory is not None:
# cleanupDTP sets dtpFactory to none. Later we'll do
# cleanup here or something.
self.cleanupDTP()
self.dtpFactory = DTPFactory(pi=self)
self.dtpFactory.setTimeout(self.dtpTimeout)
self.dtpPort = self.getDTPPort(self.dtpFactory)
host = self.transport.getHost().host
port = self.dtpPort.getHost().port
self.reply(ENTERING_PASV_MODE, encodeHostPort(host, port))
return self.dtpFactory.deferred.addCallback(lambda ign: None)
def ftp_PORT(self, address):
addr = map(int, address.split(','))
ip = '%d.%d.%d.%d' % tuple(addr[:4])
port = addr[4] << 8 | addr[5]
# if we have a DTP port set up, lose it.
if self.dtpFactory is not None:
self.cleanupDTP()
self.dtpFactory = DTPFactory(pi=self, peerHost=self.transport.getPeer().host)
self.dtpFactory.setTimeout(self.dtpTimeout)
self.dtpPort = reactor.connectTCP(ip, port, self.dtpFactory)
def connected(ignored):
return ENTERING_PORT_MODE
def connFailed(err):
err.trap(PortConnectionError)
return CANT_OPEN_DATA_CNX
return self.dtpFactory.deferred.addCallbacks(connected, connFailed)
def ftp_LIST(self, path=''):
""" This command causes a list to be sent from the server to the
passive DTP. If the pathname specifies a directory or other
group of files, the server should transfer a list of files
in the specified directory. If the pathname specifies a
file then the server should send current information on the
file. A null argument implies the user's current working or
default directory.
"""
# Uh, for now, do this retarded thing.
if self.dtpInstance is None or not self.dtpInstance.isConnected:
return defer.fail(BadCmdSequenceError('must send PORT or PASV before RETR'))
# bug in konqueror
if path == "-a":
path = ''
# bug in gFTP 2.0.15
if path == "-aL":
path = ''
# bug in Nautilus 2.10.0
if path == "-L":
path = ''
# bug in ange-ftp
if path == "-la":
path = ''
def gotListing(results):
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, attrs) in results:
self.dtpInstance.sendListResponse(name, attrs)
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
try:
segments = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
d = self.shell.list(
segments,
('size', 'directory', 'permissions', 'hardlinks',
'modified', 'owner', 'group'))
d.addCallback(gotListing)
return d
def ftp_NLST(self, path):
"""
This command causes a directory listing to be sent from the server to
the client. The pathname should specify a directory or other
system-specific file group descriptor. An empty path implies the current
working directory. If the path is non-existent, send nothing. If the
path is to a file, send only the file name.
@type path: C{str}
@param path: The path for which a directory listing should be returned.
@rtype: L{Deferred}
@return: a L{Deferred} which will be fired when the listing request
is finished.
"""
# XXX: why is this check different from ftp_RETR/ftp_STOR? See #4180
if self.dtpInstance is None or not self.dtpInstance.isConnected:
return defer.fail(
BadCmdSequenceError('must send PORT or PASV before RETR'))
try:
segments = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
def cbList(results):
"""
Send, line by line, each file in the directory listing, and then
close the connection.
@type results: A C{list} of C{tuple}. The first element of each
C{tuple} is a C{str} and the second element is a C{list}.
@param results: The names of the files in the directory.
@rtype: C{tuple}
@return: A C{tuple} containing the status code for a successful
transfer.
"""
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, ignored) in results:
self.dtpInstance.sendLine(name)
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
def cbGlob(results):
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, ignored) in results:
if fnmatch.fnmatch(name, segments[-1]):
self.dtpInstance.sendLine(name)
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
def listErr(results):
"""
RFC 959 specifies that an NLST request may only return directory
listings. Thus, send nothing and just close the connection.
@type results: L{Failure}
@param results: The L{Failure} wrapping a L{FileNotFoundError} that
occurred while trying to list the contents of a nonexistent
directory.
@rtype: C{tuple}
@returns: A C{tuple} containing the status code for a successful
transfer.
"""
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
# XXX This globbing may be incomplete: see #4181
if segments and (
'*' in segments[-1] or '?' in segments[-1] or
('[' in segments[-1] and ']' in segments[-1])):
d = self.shell.list(segments[:-1])
d.addCallback(cbGlob)
else:
d = self.shell.list(segments)
d.addCallback(cbList)
# self.shell.list will generate an error if the path is invalid
d.addErrback(listErr)
return d
def ftp_CWD(self, path):
try:
segments = toSegments(self.workingDirectory, path)
except InvalidPath:
# XXX Eh, what to fail with here?
return defer.fail(FileNotFoundError(path))
def accessGranted(result):
self.workingDirectory = segments
return (REQ_FILE_ACTN_COMPLETED_OK,)
return self.shell.access(segments).addCallback(accessGranted)
def ftp_CDUP(self):
return self.ftp_CWD('..')
def ftp_PWD(self):
return (PWD_REPLY, '/' + '/'.join(self.workingDirectory))
def ftp_RETR(self, path):
if self.dtpInstance is None:
raise BadCmdSequenceError('PORT or PASV required before RETR')
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
# XXX For now, just disable the timeout. Later we'll want to
# leave it active and have the DTP connection reset it
# periodically.
self.setTimeout(None)
# Put it back later
def enableTimeout(result):
self.setTimeout(self.factory.timeOut)
return result
# And away she goes
if not self.binary:
cons = ASCIIConsumerWrapper(self.dtpInstance)
else:
cons = self.dtpInstance
def cbSent(result):
return (TXFR_COMPLETE_OK,)
def ebSent(err):
log.msg("Unexpected error attempting to transmit file to client:")
log.err(err)
return (CNX_CLOSED_TXFR_ABORTED,)
def cbOpened(file):
# Tell them what to doooo
if self.dtpInstance.isConnected:
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
else:
self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
d = file.send(cons)
d.addCallbacks(cbSent, ebSent)
return d
def ebOpened(err):
if not err.check(PermissionDeniedError, FileNotFoundError, IsNotADirectoryError):
log.msg("Unexpected error attempting to open file for transmission:")
log.err(err)
if err.check(FTPCmdError):
return (err.value.errorCode, '/'.join(newsegs))
return (FILE_NOT_FOUND, '/'.join(newsegs))
d = self.shell.openForReading(newsegs)
d.addCallbacks(cbOpened, ebOpened)
d.addBoth(enableTimeout)
# Pass back Deferred that fires when the transfer is done
return d
def ftp_STOR(self, path):
if self.dtpInstance is None:
raise BadCmdSequenceError('PORT or PASV required before STOR')
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
# XXX For now, just disable the timeout. Later we'll want to
# leave it active and have the DTP connection reset it
# periodically.
self.setTimeout(None)
# Put it back later
def enableTimeout(result):
self.setTimeout(self.factory.timeOut)
return result
def cbSent(result):
return (TXFR_COMPLETE_OK,)
def ebSent(err):
log.msg("Unexpected error receiving file from client:")
log.err(err)
if err.check(FTPCmdError):
return err
return (CNX_CLOSED_TXFR_ABORTED,)
def cbConsumer(cons):
if not self.binary:
cons = ASCIIConsumerWrapper(cons)
d = self.dtpInstance.registerConsumer(cons)
# Tell them what to doooo
if self.dtpInstance.isConnected:
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
else:
self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
return d
def cbOpened(file):
d = file.receive()
d.addCallback(cbConsumer)
d.addCallback(lambda ignored: file.close())
d.addCallbacks(cbSent, ebSent)
return d
def ebOpened(err):
if not err.check(PermissionDeniedError, FileNotFoundError, IsNotADirectoryError):
log.msg("Unexpected error attempting to open file for upload:")
log.err(err)
if isinstance(err.value, FTPCmdError):
return (err.value.errorCode, '/'.join(newsegs))
return (FILE_NOT_FOUND, '/'.join(newsegs))
d = self.shell.openForWriting(newsegs)
d.addCallbacks(cbOpened, ebOpened)
d.addBoth(enableTimeout)
# Pass back Deferred that fires when the transfer is done
return d
def ftp_SIZE(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
def cbStat((size,)):
return (FILE_STATUS, str(size))
return self.shell.stat(newsegs, ('size',)).addCallback(cbStat)
def ftp_MDTM(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
def cbStat((modified,)):
return (FILE_STATUS, time.strftime('%Y%m%d%H%M%S', time.gmtime(modified)))
return self.shell.stat(newsegs, ('modified',)).addCallback(cbStat)
def ftp_TYPE(self, type):
p = type.upper()
if p:
f = getattr(self, 'type_' + p[0], None)
if f is not None:
return f(p[1:])
return self.type_UNKNOWN(p)
return (SYNTAX_ERR,)
def type_A(self, code):
if code == '' or code == 'N':
self.binary = False
return (TYPE_SET_OK, 'A' + code)
else:
return defer.fail(CmdArgSyntaxError(code))
def type_I(self, code):
if code == '':
self.binary = True
return (TYPE_SET_OK, 'I')
else:
return defer.fail(CmdArgSyntaxError(code))
def type_UNKNOWN(self, code):
return defer.fail(CmdNotImplementedForArgError(code))
def ftp_SYST(self):
return NAME_SYS_TYPE
def ftp_STRU(self, structure):
p = structure.upper()
if p == 'F':
return (CMD_OK,)
return defer.fail(CmdNotImplementedForArgError(structure))
def ftp_MODE(self, mode):
p = mode.upper()
if p == 'S':
return (CMD_OK,)
return defer.fail(CmdNotImplementedForArgError(mode))
def ftp_MKD(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
return self.shell.makeDirectory(newsegs).addCallback(lambda ign: (MKD_REPLY, path))
def ftp_RMD(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
return self.shell.removeDirectory(newsegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
def ftp_DELE(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
return self.shell.removeFile(newsegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
def ftp_NOOP(self):
return (CMD_OK,)
def ftp_RNFR(self, fromName):
self._fromName = fromName
self.state = self.RENAMING
return (REQ_FILE_ACTN_PENDING_FURTHER_INFO,)
def ftp_RNTO(self, toName):
fromName = self._fromName
del self._fromName
self.state = self.AUTHED
try:
fromsegs = toSegments(self.workingDirectory, fromName)
tosegs = toSegments(self.workingDirectory, toName)
except InvalidPath:
return defer.fail(FileNotFoundError(fromName))
return self.shell.rename(fromsegs, tosegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
def ftp_QUIT(self):
self.reply(GOODBYE_MSG)
self.transport.loseConnection()
self.disconnected = True
def cleanupDTP(self):
"""call when DTP connection exits
"""
log.msg('cleanupDTP', debug=True)
log.msg(self.dtpPort)
dtpPort, self.dtpPort = self.dtpPort, None
if interfaces.IListeningPort.providedBy(dtpPort):
dtpPort.stopListening()
elif interfaces.IConnector.providedBy(dtpPort):
dtpPort.disconnect()
else:
assert False, "dtpPort should be an IListeningPort or IConnector, instead is %r" % (dtpPort,)
self.dtpFactory.stopFactory()
self.dtpFactory = None
if self.dtpInstance is not None:
self.dtpInstance = None
class FTPFactory(policies.LimitTotalConnectionsFactory):
"""
A factory for producing ftp protocol instances
@ivar timeOut: the protocol interpreter's idle timeout time in seconds,
default is 600 seconds.
@ivar passivePortRange: value forwarded to C{protocol.passivePortRange}.
@type passivePortRange: C{iterator}
"""
protocol = FTP
overflowProtocol = FTPOverflowProtocol
allowAnonymous = True
userAnonymous = 'anonymous'
timeOut = 600
welcomeMessage = "Twisted %s FTP Server" % (copyright.version,)
passivePortRange = xrange(0, 1)
def __init__(self, portal=None, userAnonymous='anonymous'):
self.portal = portal
self.userAnonymous = userAnonymous
self.instances = []
def buildProtocol(self, addr):
p = policies.LimitTotalConnectionsFactory.buildProtocol(self, addr)
if p is not None:
p.wrappedProtocol.portal = self.portal
p.wrappedProtocol.timeOut = self.timeOut
p.wrappedProtocol.passivePortRange = self.passivePortRange
return p
def stopFactory(self):
# make sure ftp instance's timeouts are set to None
# to avoid reactor complaints
[p.setTimeout(None) for p in self.instances if p.timeOut is not None]
policies.LimitTotalConnectionsFactory.stopFactory(self)
# -- Cred Objects --
class IFTPShell(Interface):
"""
An abstraction of the shell commands used by the FTP protocol for
a given user account.
All path names must be absolute.
"""
def makeDirectory(path):
"""
Create a directory.
@param path: The path, as a list of segments, to create
@type path: C{list} of C{unicode}
@return: A Deferred which fires when the directory has been
created, or which fails if the directory cannot be created.
"""
def removeDirectory(path):
"""
Remove a directory.
@param path: The path, as a list of segments, to remove
@type path: C{list} of C{unicode}
@return: A Deferred which fires when the directory has been
removed, or which fails if the directory cannot be removed.
"""
def removeFile(path):
"""
Remove a file.
@param path: The path, as a list of segments, to remove
@type path: C{list} of C{unicode}
@return: A Deferred which fires when the file has been
removed, or which fails if the file cannot be removed.
"""
def rename(fromPath, toPath):
"""
Rename a file or directory.
@param fromPath: The current name of the path.
@type fromPath: C{list} of C{unicode}
@param toPath: The desired new name of the path.
@type toPath: C{list} of C{unicode}
@return: A Deferred which fires when the path has been
renamed, or which fails if the path cannot be renamed.
"""
def access(path):
"""
Determine whether access to the given path is allowed.
@param path: The path, as a list of segments
@return: A Deferred which fires with None if access is allowed
or which fails with a specific exception type if access is
denied.
"""
def stat(path, keys=()):
"""
Retrieve information about the given path.
This is like list, except it will never return results about
child paths.
"""
def list(path, keys=()):
"""
Retrieve information about the given path.
If the path represents a non-directory, the result list should
have only one entry with information about that non-directory.
Otherwise, the result list should have an element for each
child of the directory.
@param path: The path, as a list of segments, to list
@type path: C{list} of C{unicode}
@param keys: A tuple of keys desired in the resulting
dictionaries.
@return: A Deferred which fires with a list of (name, list),
where the name is the name of the entry as a unicode string
and each list contains values corresponding to the requested
keys. The following are possible elements of keys, and the
values which should be returned for them:
- C{'size'}: size in bytes, as an integer (this is kinda required)
- C{'directory'}: boolean indicating the type of this entry
- C{'permissions'}: a bitvector (see os.stat(foo).st_mode)
- C{'hardlinks'}: Number of hard links to this entry
- C{'modified'}: number of seconds since the epoch since entry was
modified
- C{'owner'}: string indicating the user owner of this entry
- C{'group'}: string indicating the group owner of this entry
"""
def openForReading(path):
"""
@param path: The path, as a list of segments, to open
@type path: C{list} of C{unicode}
@rtype: C{Deferred} which will fire with L{IReadFile}
"""
def openForWriting(path):
"""
@param path: The path, as a list of segments, to open
@type path: C{list} of C{unicode}
@rtype: C{Deferred} which will fire with L{IWriteFile}
"""
class IReadFile(Interface):
"""
A file out of which bytes may be read.
"""
def send(consumer):
"""
Produce the contents of the given path to the given consumer. This
method may only be invoked once on each provider.
@type consumer: C{IConsumer}
@return: A Deferred which fires when the file has been
consumed completely.
"""
class IWriteFile(Interface):
"""
A file into which bytes may be written.
"""
def receive():
"""
Create a consumer which will write to this file. This method may
only be invoked once on each provider.
@rtype: C{Deferred} of C{IConsumer}
"""
def close():
"""
Perform any post-write work that needs to be done. This method may
only be invoked once on each provider, and will always be invoked
after receive().
@rtype: C{Deferred} of anything: the value is ignored. The FTP client
will not see their upload request complete until this Deferred has
been fired.
"""
def _getgroups(uid):
"""Return the primary and supplementary groups for the given UID.
@type uid: C{int}
"""
result = []
pwent = pwd.getpwuid(uid)
result.append(pwent.pw_gid)
for grent in grp.getgrall():
if pwent.pw_name in grent.gr_mem:
result.append(grent.gr_gid)
return result
def _testPermissions(uid, gid, spath, mode='r'):
"""
checks to see if uid has proper permissions to access path with mode
@type uid: C{int}
@param uid: numeric user id
@type gid: C{int}
@param gid: numeric group id
@type spath: C{str}
@param spath: the path on the server to test
@type mode: C{str}
@param mode: 'r' or 'w' (read or write)
@rtype: C{bool}
@return: True if the given credentials have the specified form of
access to the given path
"""
if mode == 'r':
usr = stat.S_IRUSR
grp = stat.S_IRGRP
oth = stat.S_IROTH
amode = os.R_OK
elif mode == 'w':
usr = stat.S_IWUSR
grp = stat.S_IWGRP
oth = stat.S_IWOTH
amode = os.W_OK
else:
raise ValueError("Invalid mode %r: must specify 'r' or 'w'" % (mode,))
access = False
if os.path.exists(spath):
if uid == 0:
access = True
else:
s = os.stat(spath)
if usr & s.st_mode and uid == s.st_uid:
access = True
elif grp & s.st_mode and gid in _getgroups(uid):
access = True
elif oth & s.st_mode:
access = True
if access:
if not os.access(spath, amode):
access = False
log.msg("Filesystem grants permission to UID %d but it is inaccessible to me running as UID %d" % (
uid, os.getuid()))
return access
class FTPAnonymousShell(object):
"""
An anonymous implementation of IFTPShell
@type filesystemRoot: L{twisted.python.filepath.FilePath}
@ivar filesystemRoot: The path which is considered the root of
this shell.
"""
implements(IFTPShell)
def __init__(self, filesystemRoot):
self.filesystemRoot = filesystemRoot
def _path(self, path):
return reduce(filepath.FilePath.child, path, self.filesystemRoot)
def makeDirectory(self, path):
return defer.fail(AnonUserDeniedError())
def removeDirectory(self, path):
return defer.fail(AnonUserDeniedError())
def removeFile(self, path):
return defer.fail(AnonUserDeniedError())
def rename(self, fromPath, toPath):
return defer.fail(AnonUserDeniedError())
def receive(self, path):
path = self._path(path)
return defer.fail(AnonUserDeniedError())
def openForReading(self, path):
"""
Open C{path} for reading.
@param path: The path, as a list of segments, to open.
@type path: C{list} of C{unicode}
@return: A L{Deferred} is returned that will fire with an object
implementing L{IReadFile} if the file is successfully opened. If
C{path} is a directory, or if an exception is raised while trying
to open the file, the L{Deferred} will fire with an error.
"""
p = self._path(path)
if p.isdir():
# Normally, we would only check for EISDIR in open, but win32
# returns EACCES in this case, so we check before
return defer.fail(IsADirectoryError(path))
try:
f = p.open('r')
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(_FileReader(f))
def openForWriting(self, path):
"""
Reject write attempts by anonymous users with
L{PermissionDeniedError}.
"""
return defer.fail(PermissionDeniedError("STOR not allowed"))
def access(self, path):
p = self._path(path)
if not p.exists():
# Again, win32 doesn't report a sane error after, so let's fail
# early if we can
return defer.fail(FileNotFoundError(path))
# For now, just see if we can os.listdir() it
try:
p.listdir()
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def stat(self, path, keys=()):
p = self._path(path)
if p.isdir():
try:
statResult = self._statNode(p, keys)
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(statResult)
else:
return self.list(path, keys).addCallback(lambda res: res[0][1])
def list(self, path, keys=()):
"""
Return the list of files at given C{path}, adding C{keys} stat
informations if specified.
@param path: the directory or file to check.
@type path: C{str}
@param keys: the list of desired metadata
@type keys: C{list} of C{str}
"""
filePath = self._path(path)
if filePath.isdir():
entries = filePath.listdir()
fileEntries = [filePath.child(p) for p in entries]
elif filePath.isfile():
entries = [os.path.join(*filePath.segmentsFrom(self.filesystemRoot))]
fileEntries = [filePath]
else:
return defer.fail(FileNotFoundError(path))
results = []
for fileName, filePath in zip(entries, fileEntries):
ent = []
results.append((fileName, ent))
if keys:
try:
ent.extend(self._statNode(filePath, keys))
except (IOError, OSError), e:
return errnoToFailure(e.errno, fileName)
except:
return defer.fail()
return defer.succeed(results)
def _statNode(self, filePath, keys):
"""
Shortcut method to get stat info on a node.
@param filePath: the node to stat.
@type filePath: C{filepath.FilePath}
@param keys: the stat keys to get.
@type keys: C{iterable}
"""
filePath.restat()
return [getattr(self, '_stat_' + k)(filePath.statinfo) for k in keys]
_stat_size = operator.attrgetter('st_size')
_stat_permissions = operator.attrgetter('st_mode')
_stat_hardlinks = operator.attrgetter('st_nlink')
_stat_modified = operator.attrgetter('st_mtime')
def _stat_owner(self, st):
if pwd is not None:
try:
return pwd.getpwuid(st.st_uid)[0]
except KeyError:
pass
return str(st.st_uid)
def _stat_group(self, st):
if grp is not None:
try:
return grp.getgrgid(st.st_gid)[0]
except KeyError:
pass
return str(st.st_gid)
def _stat_directory(self, st):
return bool(st.st_mode & stat.S_IFDIR)
class _FileReader(object):
implements(IReadFile)
def __init__(self, fObj):
self.fObj = fObj
self._send = False
def _close(self, passthrough):
self._send = True
self.fObj.close()
return passthrough
def send(self, consumer):
assert not self._send, "Can only call IReadFile.send *once* per instance"
self._send = True
d = basic.FileSender().beginFileTransfer(self.fObj, consumer)
d.addBoth(self._close)
return d
class FTPShell(FTPAnonymousShell):
"""
An authenticated implementation of L{IFTPShell}.
"""
def makeDirectory(self, path):
p = self._path(path)
try:
p.makedirs()
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def removeDirectory(self, path):
p = self._path(path)
if p.isfile():
# Win32 returns the wrong errno when rmdir is called on a file
# instead of a directory, so as we have the info here, let's fail
# early with a pertinent error
return defer.fail(IsNotADirectoryError(path))
try:
os.rmdir(p.path)
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def removeFile(self, path):
p = self._path(path)
if p.isdir():
# Win32 returns the wrong errno when remove is called on a
# directory instead of a file, so as we have the info here,
# let's fail early with a pertinent error
return defer.fail(IsADirectoryError(path))
try:
p.remove()
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def rename(self, fromPath, toPath):
fp = self._path(fromPath)
tp = self._path(toPath)
try:
os.rename(fp.path, tp.path)
except (IOError, OSError), e:
return errnoToFailure(e.errno, fromPath)
except:
return defer.fail()
else:
return defer.succeed(None)
def openForWriting(self, path):
"""
Open C{path} for writing.
@param path: The path, as a list of segments, to open.
@type path: C{list} of C{unicode}
@return: A L{Deferred} is returned that will fire with an object
implementing L{IWriteFile} if the file is successfully opened. If
C{path} is a directory, or if an exception is raised while trying
to open the file, the L{Deferred} will fire with an error.
"""
p = self._path(path)
if p.isdir():
# Normally, we would only check for EISDIR in open, but win32
# returns EACCES in this case, so we check before
return defer.fail(IsADirectoryError(path))
try:
fObj = p.open('w')
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
return defer.succeed(_FileWriter(fObj))
class _FileWriter(object):
implements(IWriteFile)
def __init__(self, fObj):
self.fObj = fObj
self._receive = False
def receive(self):
assert not self._receive, "Can only call IWriteFile.receive *once* per instance"
self._receive = True
# FileConsumer will close the file object
return defer.succeed(FileConsumer(self.fObj))
def close(self):
return defer.succeed(None)
class BaseFTPRealm:
"""
Base class for simple FTP realms which provides an easy hook for specifying
the home directory for each user.
"""
implements(portal.IRealm)
def __init__(self, anonymousRoot):
self.anonymousRoot = filepath.FilePath(anonymousRoot)
def getHomeDirectory(self, avatarId):
"""
Return a L{FilePath} representing the home directory of the given
avatar. Override this in a subclass.
@param avatarId: A user identifier returned from a credentials checker.
@type avatarId: C{str}
@rtype: L{FilePath}
"""
raise NotImplementedError(
"%r did not override getHomeDirectory" % (self.__class__,))
def requestAvatar(self, avatarId, mind, *interfaces):
for iface in interfaces:
if iface is IFTPShell:
if avatarId is checkers.ANONYMOUS:
avatar = FTPAnonymousShell(self.anonymousRoot)
else:
avatar = FTPShell(self.getHomeDirectory(avatarId))
return (IFTPShell, avatar,
getattr(avatar, 'logout', lambda: None))
raise NotImplementedError(
"Only IFTPShell interface is supported by this realm")
class FTPRealm(BaseFTPRealm):
"""
@type anonymousRoot: L{twisted.python.filepath.FilePath}
@ivar anonymousRoot: Root of the filesystem to which anonymous
users will be granted access.
@type userHome: L{filepath.FilePath}
@ivar userHome: Root of the filesystem containing user home directories.
"""
def __init__(self, anonymousRoot, userHome='/home'):
BaseFTPRealm.__init__(self, anonymousRoot)
self.userHome = filepath.FilePath(userHome)
def getHomeDirectory(self, avatarId):
"""
Use C{avatarId} as a single path segment to construct a child of
C{self.userHome} and return that child.
"""
return self.userHome.child(avatarId)
class SystemFTPRealm(BaseFTPRealm):
"""
L{SystemFTPRealm} uses system user account information to decide what the
home directory for a particular avatarId is.
This works on POSIX but probably is not reliable on Windows.
"""
def getHomeDirectory(self, avatarId):
"""
Return the system-defined home directory of the system user account with
the name C{avatarId}.
"""
path = os.path.expanduser('~' + avatarId)
if path.startswith('~'):
raise cred_error.UnauthorizedLogin()
return filepath.FilePath(path)
# --- FTP CLIENT -------------------------------------------------------------
####
# And now for the client...
# Notes:
# * Reference: http://cr.yp.to/ftp.html
# * FIXME: Does not support pipelining (which is not supported by all
# servers anyway). This isn't a functionality limitation, just a
# small performance issue.
# * Only has a rudimentary understanding of FTP response codes (although
# the full response is passed to the caller if they so choose).
# * Assumes that USER and PASS should always be sent
# * Always sets TYPE I (binary mode)
# * Doesn't understand any of the weird, obscure TELNET stuff (\377...)
# * FIXME: Doesn't share any code with the FTPServer
class ConnectionLost(FTPError):
pass
class CommandFailed(FTPError):
pass
class BadResponse(FTPError):
pass
class UnexpectedResponse(FTPError):
pass
class UnexpectedData(FTPError):
pass
class FTPCommand:
def __init__(self, text=None, public=0):
self.text = text
self.deferred = defer.Deferred()
self.ready = 1
self.public = public
self.transferDeferred = None
def fail(self, failure):
if self.public:
self.deferred.errback(failure)
class ProtocolWrapper(protocol.Protocol):
def __init__(self, original, deferred):
self.original = original
self.deferred = deferred
def makeConnection(self, transport):
self.original.makeConnection(transport)
def dataReceived(self, data):
self.original.dataReceived(data)
def connectionLost(self, reason):
self.original.connectionLost(reason)
# Signal that transfer has completed
self.deferred.callback(None)
class IFinishableConsumer(interfaces.IConsumer):
"""
A Consumer for producers that finish.
@since: 11.0
"""
def finish():
"""
The producer has finished producing.
"""
class SenderProtocol(protocol.Protocol):
implements(IFinishableConsumer)
def __init__(self):
# Fired upon connection
self.connectedDeferred = defer.Deferred()
# Fired upon disconnection
self.deferred = defer.Deferred()
#Protocol stuff
def dataReceived(self, data):
raise UnexpectedData(
"Received data from the server on a "
"send-only data-connection"
)
def makeConnection(self, transport):
protocol.Protocol.makeConnection(self, transport)
self.connectedDeferred.callback(self)
def connectionLost(self, reason):
if reason.check(error.ConnectionDone):
self.deferred.callback('connection done')
else:
self.deferred.errback(reason)
#IFinishableConsumer stuff
def write(self, data):
self.transport.write(data)
def registerProducer(self, producer, streaming):
"""
Register the given producer with our transport.
"""
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
"""
Unregister the previously registered producer.
"""
self.transport.unregisterProducer()
def finish(self):
self.transport.loseConnection()
def decodeHostPort(line):
"""Decode an FTP response specifying a host and port.
@return: a 2-tuple of (host, port).
"""
abcdef = re.sub('[^0-9, ]', '', line)
parsed = [int(p.strip()) for p in abcdef.split(',')]
for x in parsed:
if x < 0 or x > 255:
raise ValueError("Out of range", line, x)
a, b, c, d, e, f = parsed
host = "%s.%s.%s.%s" % (a, b, c, d)
port = (int(e) << 8) + int(f)
return host, port
def encodeHostPort(host, port):
numbers = host.split('.') + [str(port >> 8), str(port % 256)]
return ','.join(numbers)
def _unwrapFirstError(failure):
failure.trap(defer.FirstError)
return failure.value.subFailure
class FTPDataPortFactory(protocol.ServerFactory):
"""Factory for data connections that use the PORT command
(i.e. "active" transfers)
"""
noisy = 0
def buildProtocol(self, addr):
# This is a bit hackish -- we already have a Protocol instance,
# so just return it instead of making a new one
# FIXME: Reject connections from the wrong address/port
# (potential security problem)
self.protocol.factory = self
self.port.loseConnection()
return self.protocol
class FTPClientBasic(basic.LineReceiver):
"""
Foundations of an FTP client.
"""
debug = False
def __init__(self):
self.actionQueue = []
self.greeting = None
self.nextDeferred = defer.Deferred().addCallback(self._cb_greeting)
self.nextDeferred.addErrback(self.fail)
self.response = []
self._failed = 0
def fail(self, error):
"""
Give an error to any queued deferreds.
"""
self._fail(error)
def _fail(self, error):
"""
Errback all queued deferreds.
"""
if self._failed:
# We're recursing; bail out here for simplicity
return error
self._failed = 1
if self.nextDeferred:
try:
self.nextDeferred.errback(failure.Failure(ConnectionLost('FTP connection lost', error)))
except defer.AlreadyCalledError:
pass
for ftpCommand in self.actionQueue:
ftpCommand.fail(failure.Failure(ConnectionLost('FTP connection lost', error)))
return error
def _cb_greeting(self, greeting):
self.greeting = greeting
def sendLine(self, line):
"""
(Private) Sends a line, unless line is None.
"""
if line is None:
return
basic.LineReceiver.sendLine(self, line)
def sendNextCommand(self):
"""
(Private) Processes the next command in the queue.
"""
ftpCommand = self.popCommandQueue()
if ftpCommand is None:
self.nextDeferred = None
return
if not ftpCommand.ready:
self.actionQueue.insert(0, ftpCommand)
reactor.callLater(1.0, self.sendNextCommand)
self.nextDeferred = None
return
# FIXME: this if block doesn't belong in FTPClientBasic, it belongs in
# FTPClient.
if ftpCommand.text == 'PORT':
self.generatePortCommand(ftpCommand)
if self.debug:
log.msg('<-- %s' % ftpCommand.text)
self.nextDeferred = ftpCommand.deferred
self.sendLine(ftpCommand.text)
def queueCommand(self, ftpCommand):
"""
Add an FTPCommand object to the queue.
If it's the only thing in the queue, and we are connected and we aren't
waiting for a response of an earlier command, the command will be sent
immediately.
@param ftpCommand: an L{FTPCommand}
"""
self.actionQueue.append(ftpCommand)
if (len(self.actionQueue) == 1 and self.transport is not None and
self.nextDeferred is None):
self.sendNextCommand()
def queueStringCommand(self, command, public=1):
"""
Queues a string to be issued as an FTP command
@param command: string of an FTP command to queue
@param public: a flag intended for internal use by FTPClient. Don't
change it unless you know what you're doing.
@return: a L{Deferred} that will be called when the response to the
command has been received.
"""
ftpCommand = FTPCommand(command, public)
self.queueCommand(ftpCommand)
return ftpCommand.deferred
def popCommandQueue(self):
"""
Return the front element of the command queue, or None if empty.
"""
if self.actionQueue:
return self.actionQueue.pop(0)
else:
return None
def queueLogin(self, username, password):
"""
Login: send the username, send the password.
If the password is C{None}, the PASS command won't be sent. Also, if
the response to the USER command has a response code of 230 (User logged
in), then PASS won't be sent either.
"""
# Prepare the USER command
deferreds = []
userDeferred = self.queueStringCommand('USER ' + username, public=0)
deferreds.append(userDeferred)
# Prepare the PASS command (if a password is given)
if password is not None:
passwordCmd = FTPCommand('PASS ' + password, public=0)
self.queueCommand(passwordCmd)
deferreds.append(passwordCmd.deferred)
# Avoid sending PASS if the response to USER is 230.
# (ref: http://cr.yp.to/ftp/user.html#user)
def cancelPasswordIfNotNeeded(response):
if response[0].startswith('230'):
# No password needed!
self.actionQueue.remove(passwordCmd)
return response
userDeferred.addCallback(cancelPasswordIfNotNeeded)
# Error handling.
for deferred in deferreds:
# If something goes wrong, call fail
deferred.addErrback(self.fail)
# But also swallow the error, so we don't cause spurious errors
deferred.addErrback(lambda x: None)
def lineReceived(self, line):
"""
(Private) Parses the response messages from the FTP server.
"""
# Add this line to the current response
if self.debug:
log.msg('--> %s' % line)
self.response.append(line)
# Bail out if this isn't the last line of a response
# The last line of response starts with 3 digits followed by a space
codeIsValid = re.match(r'\d{3} ', line)
if not codeIsValid:
return
code = line[0:3]
# Ignore marks
if code[0] == '1':
return
# Check that we were expecting a response
if self.nextDeferred is None:
self.fail(UnexpectedResponse(self.response))
return
# Reset the response
response = self.response
self.response = []
# Look for a success or error code, and call the appropriate callback
if code[0] in ('2', '3'):
# Success
self.nextDeferred.callback(response)
elif code[0] in ('4', '5'):
# Failure
self.nextDeferred.errback(failure.Failure(CommandFailed(response)))
else:
# This shouldn't happen unless something screwed up.
log.msg('Server sent invalid response code %s' % (code,))
self.nextDeferred.errback(failure.Failure(BadResponse(response)))
# Run the next command
self.sendNextCommand()
def connectionLost(self, reason):
self._fail(reason)
class _PassiveConnectionFactory(protocol.ClientFactory):
noisy = False
def __init__(self, protoInstance):
self.protoInstance = protoInstance
def buildProtocol(self, ignored):
self.protoInstance.factory = self
return self.protoInstance
def clientConnectionFailed(self, connector, reason):
e = FTPError('Connection Failed', reason)
self.protoInstance.deferred.errback(e)
class FTPClient(FTPClientBasic):
"""
L{FTPClient} is a client implementation of the FTP protocol which
exposes FTP commands as methods which return L{Deferred}s.
Each command method returns a L{Deferred} which is called back when a
successful response code (2xx or 3xx) is received from the server or
which is error backed if an error response code (4xx or 5xx) is received
from the server or if a protocol violation occurs. If an error response
code is received, the L{Deferred} fires with a L{Failure} wrapping a
L{CommandFailed} instance. The L{CommandFailed} instance is created
with a list of the response lines received from the server.
See U{RFC 959<http://www.ietf.org/rfc/rfc959.txt>} for error code
definitions.
Both active and passive transfers are supported.
@ivar passive: See description in __init__.
"""
connectFactory = reactor.connectTCP
def __init__(self, username='anonymous',
password='twisted@twistedmatrix.com',
passive=1):
"""
Constructor.
I will login as soon as I receive the welcome message from the server.
@param username: FTP username
@param password: FTP password
@param passive: flag that controls if I use active or passive data
connections. You can also change this after construction by
assigning to C{self.passive}.
"""
FTPClientBasic.__init__(self)
self.queueLogin(username, password)
self.passive = passive
def fail(self, error):
"""
Disconnect, and also give an error to any queued deferreds.
"""
self.transport.loseConnection()
self._fail(error)
def receiveFromConnection(self, commands, protocol):
"""
Retrieves a file or listing generated by the given command,
feeding it to the given protocol.
@param commands: list of strings of FTP commands to execute then receive
the results of (e.g. C{LIST}, C{RETR})
@param protocol: A L{Protocol} B{instance} e.g. an
L{FTPFileListProtocol}, or something that can be adapted to one.
Typically this will be an L{IConsumer} implementation.
@return: L{Deferred}.
"""
protocol = interfaces.IProtocol(protocol)
wrapper = ProtocolWrapper(protocol, defer.Deferred())
return self._openDataConnection(commands, wrapper)
def queueLogin(self, username, password):
"""
Login: send the username, send the password, and
set retrieval mode to binary
"""
FTPClientBasic.queueLogin(self, username, password)
d = self.queueStringCommand('TYPE I', public=0)
# If something goes wrong, call fail
d.addErrback(self.fail)
# But also swallow the error, so we don't cause spurious errors
d.addErrback(lambda x: None)
def sendToConnection(self, commands):
"""
XXX
@return: A tuple of two L{Deferred}s:
- L{Deferred} L{IFinishableConsumer}. You must call
the C{finish} method on the IFinishableConsumer when the file
is completely transferred.
- L{Deferred} list of control-connection responses.
"""
s = SenderProtocol()
r = self._openDataConnection(commands, s)
return (s.connectedDeferred, r)
def _openDataConnection(self, commands, protocol):
"""
This method returns a DeferredList.
"""
cmds = [FTPCommand(command, public=1) for command in commands]
cmdsDeferred = defer.DeferredList([cmd.deferred for cmd in cmds],
fireOnOneErrback=True, consumeErrors=True)
cmdsDeferred.addErrback(_unwrapFirstError)
if self.passive:
# Hack: use a mutable object to sneak a variable out of the
# scope of doPassive
_mutable = [None]
def doPassive(response):
"""Connect to the port specified in the response to PASV"""
host, port = decodeHostPort(response[-1][4:])
f = _PassiveConnectionFactory(protocol)
_mutable[0] = self.connectFactory(host, port, f)
pasvCmd = FTPCommand('PASV')
self.queueCommand(pasvCmd)
pasvCmd.deferred.addCallback(doPassive).addErrback(self.fail)
results = [cmdsDeferred, pasvCmd.deferred, protocol.deferred]
d = defer.DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
d.addErrback(_unwrapFirstError)
# Ensure the connection is always closed
def close(x, m=_mutable):
m[0] and m[0].disconnect()
return x
d.addBoth(close)
else:
# We just place a marker command in the queue, and will fill in
# the host and port numbers later (see generatePortCommand)
portCmd = FTPCommand('PORT')
# Ok, now we jump through a few hoops here.
# This is the problem: a transfer is not to be trusted as complete
# until we get both the "226 Transfer complete" message on the
# control connection, and the data socket is closed. Thus, we use
# a DeferredList to make sure we only fire the callback at the
# right time.
portCmd.transferDeferred = protocol.deferred
portCmd.protocol = protocol
portCmd.deferred.addErrback(portCmd.transferDeferred.errback)
self.queueCommand(portCmd)
# Create dummy functions for the next callback to call.
# These will also be replaced with real functions in
# generatePortCommand.
portCmd.loseConnection = lambda result: result
portCmd.fail = lambda error: error
# Ensure that the connection always gets closed
cmdsDeferred.addErrback(lambda e, pc=portCmd: pc.fail(e) or e)
results = [cmdsDeferred, portCmd.deferred, portCmd.transferDeferred]
d = defer.DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
d.addErrback(_unwrapFirstError)
for cmd in cmds:
self.queueCommand(cmd)
return d
def generatePortCommand(self, portCmd):
"""
(Private) Generates the text of a given PORT command.
"""
# The problem is that we don't create the listening port until we need
# it for various reasons, and so we have to muck about to figure out
# what interface and port it's listening on, and then finally we can
# create the text of the PORT command to send to the FTP server.
# FIXME: This method is far too ugly.
# FIXME: The best solution is probably to only create the data port
# once per FTPClient, and just recycle it for each new download.
# This should be ok, because we don't pipeline commands.
# Start listening on a port
factory = FTPDataPortFactory()
factory.protocol = portCmd.protocol
listener = reactor.listenTCP(0, factory)
factory.port = listener
# Ensure we close the listening port if something goes wrong
def listenerFail(error, listener=listener):
if listener.connected:
listener.loseConnection()
return error
portCmd.fail = listenerFail
# Construct crufty FTP magic numbers that represent host & port
host = self.transport.getHost().host
port = listener.getHost().port
portCmd.text = 'PORT ' + encodeHostPort(host, port)
def escapePath(self, path):
"""
Returns a FTP escaped path (replace newlines with nulls).
"""
# Escape newline characters
return path.replace('\n', '\0')
def retrieveFile(self, path, protocol, offset=0):
"""
Retrieve a file from the given path
This method issues the 'RETR' FTP command.
The file is fed into the given Protocol instance. The data connection
will be passive if self.passive is set.
@param path: path to file that you wish to receive.
@param protocol: a L{Protocol} instance.
@param offset: offset to start downloading from
@return: L{Deferred}
"""
cmds = ['RETR ' + self.escapePath(path)]
if offset:
cmds.insert(0, ('REST ' + str(offset)))
return self.receiveFromConnection(cmds, protocol)
retr = retrieveFile
def storeFile(self, path, offset=0):
"""
Store a file at the given path.
This method issues the 'STOR' FTP command.
@return: A tuple of two L{Deferred}s:
- L{Deferred} L{IFinishableConsumer}. You must call
the C{finish} method on the IFinishableConsumer when the file
is completely transferred.
- L{Deferred} list of control-connection responses.
"""
cmds = ['STOR ' + self.escapePath(path)]
if offset:
cmds.insert(0, ('REST ' + str(offset)))
return self.sendToConnection(cmds)
stor = storeFile
def rename(self, pathFrom, pathTo):
"""
Rename a file.
This method issues the I{RNFR}/I{RNTO} command sequence to rename
C{pathFrom} to C{pathTo}.
@param: pathFrom: the absolute path to the file to be renamed
@type pathFrom: C{str}
@param: pathTo: the absolute path to rename the file to.
@type pathTo: C{str}
@return: A L{Deferred} which fires when the rename operation has
succeeded or failed. If it succeeds, the L{Deferred} is called
back with a two-tuple of lists. The first list contains the
responses to the I{RNFR} command. The second list contains the
responses to the I{RNTO} command. If either I{RNFR} or I{RNTO}
fails, the L{Deferred} is errbacked with L{CommandFailed} or
L{BadResponse}.
@rtype: L{Deferred}
@since: 8.2
"""
renameFrom = self.queueStringCommand('RNFR ' + self.escapePath(pathFrom))
renameTo = self.queueStringCommand('RNTO ' + self.escapePath(pathTo))
fromResponse = []
# Use a separate Deferred for the ultimate result so that Deferred
# chaining can't interfere with its result.
result = defer.Deferred()
# Bundle up all the responses
result.addCallback(lambda toResponse: (fromResponse, toResponse))
def ebFrom(failure):
# Make sure the RNTO doesn't run if the RNFR failed.
self.popCommandQueue()
result.errback(failure)
# Save the RNFR response to pass to the result Deferred later
renameFrom.addCallbacks(fromResponse.extend, ebFrom)
# Hook up the RNTO to the result Deferred as well
renameTo.chainDeferred(result)
return result
def list(self, path, protocol):
"""
Retrieve a file listing into the given protocol instance.
This method issues the 'LIST' FTP command.
@param path: path to get a file listing for.
@param protocol: a L{Protocol} instance, probably a
L{FTPFileListProtocol} instance. It can cope with most common file
listing formats.
@return: L{Deferred}
"""
if path is None:
path = ''
return self.receiveFromConnection(['LIST ' + self.escapePath(path)], protocol)
def nlst(self, path, protocol):
"""
Retrieve a short file listing into the given protocol instance.
This method issues the 'NLST' FTP command.
NLST (should) return a list of filenames, one per line.
@param path: path to get short file listing for.
@param protocol: a L{Protocol} instance.
"""
if path is None:
path = ''
return self.receiveFromConnection(['NLST ' + self.escapePath(path)], protocol)
def cwd(self, path):
"""
Issues the CWD (Change Working Directory) command. It's also
available as changeDirectory, which parses the result.
@return: a L{Deferred} that will be called when done.
"""
return self.queueStringCommand('CWD ' + self.escapePath(path))
def changeDirectory(self, path):
"""
Change the directory on the server and parse the result to determine
if it was successful or not.
@type path: C{str}
@param path: The path to which to change.
@return: a L{Deferred} which will be called back when the directory
change has succeeded or errbacked if an error occurrs.
"""
warnings.warn(
"FTPClient.changeDirectory is deprecated in Twisted 8.2 and "
"newer. Use FTPClient.cwd instead.",
category=DeprecationWarning,
stacklevel=2)
def cbResult(result):
if result[-1][:3] != '250':
return failure.Failure(CommandFailed(result))
return True
return self.cwd(path).addCallback(cbResult)
def makeDirectory(self, path):
"""
Make a directory
This method issues the MKD command.
@param path: The path to the directory to create.
@type path: C{str}
@return: A L{Deferred} which fires when the server responds. If the
directory is created, the L{Deferred} is called back with the
server response. If the server response indicates the directory
was not created, the L{Deferred} is errbacked with a L{Failure}
wrapping L{CommandFailed} or L{BadResponse}.
@rtype: L{Deferred}
@since: 8.2
"""
return self.queueStringCommand('MKD ' + self.escapePath(path))
def removeFile(self, path):
"""
Delete a file on the server.
L{removeFile} issues a I{DELE} command to the server to remove the
indicated file. Note that this command cannot remove a directory.
@param path: The path to the file to delete. May be relative to the
current dir.
@type path: C{str}
@return: A L{Deferred} which fires when the server responds. On error,
it is errbacked with either L{CommandFailed} or L{BadResponse}. On
success, it is called back with a list of response lines.
@rtype: L{Deferred}
@since: 8.2
"""
return self.queueStringCommand('DELE ' + self.escapePath(path))
def cdup(self):
"""
Issues the CDUP (Change Directory UP) command.
@return: a L{Deferred} that will be called when done.
"""
return self.queueStringCommand('CDUP')
def pwd(self):
"""
Issues the PWD (Print Working Directory) command.
The L{getDirectory} does the same job but automatically parses the
result.
@return: a L{Deferred} that will be called when done. It is up to the
caller to interpret the response, but the L{parsePWDResponse} method
in this module should work.
"""
return self.queueStringCommand('PWD')
def getDirectory(self):
"""
Returns the current remote directory.
@return: a L{Deferred} that will be called back with a C{str} giving
the remote directory or which will errback with L{CommandFailed}
if an error response is returned.
"""
def cbParse(result):
try:
# The only valid code is 257
if int(result[0].split(' ', 1)[0]) != 257:
raise ValueError
except (IndexError, ValueError):
return failure.Failure(CommandFailed(result))
path = parsePWDResponse(result[0])
if path is None:
return failure.Failure(CommandFailed(result))
return path
return self.pwd().addCallback(cbParse)
def quit(self):
"""
Issues the I{QUIT} command.
@return: A L{Deferred} that fires when the server acknowledges the
I{QUIT} command. The transport should not be disconnected until
this L{Deferred} fires.
"""
return self.queueStringCommand('QUIT')
class FTPFileListProtocol(basic.LineReceiver):
"""Parser for standard FTP file listings
This is the evil required to match::
-rw-r--r-- 1 root other 531 Jan 29 03:26 README
If you need different evil for a wacky FTP server, you can
override either C{fileLinePattern} or C{parseDirectoryLine()}.
It populates the instance attribute self.files, which is a list containing
dicts with the following keys (examples from the above line):
- filetype: e.g. 'd' for directories, or '-' for an ordinary file
- perms: e.g. 'rw-r--r--'
- nlinks: e.g. 1
- owner: e.g. 'root'
- group: e.g. 'other'
- size: e.g. 531
- date: e.g. 'Jan 29 03:26'
- filename: e.g. 'README'
- linktarget: e.g. 'some/file'
Note that the 'date' value will be formatted differently depending on the
date. Check U{http://cr.yp.to/ftp.html} if you really want to try to parse
it.
@ivar files: list of dicts describing the files in this listing
"""
fileLinePattern = re.compile(
r'^(?P<filetype>.)(?P<perms>.{9})\s+(?P<nlinks>\d*)\s*'
r'(?P<owner>\S+)\s+(?P<group>\S+)\s+(?P<size>\d+)\s+'
r'(?P<date>...\s+\d+\s+[\d:]+)\s+(?P<filename>([^ ]|\\ )*?)'
r'( -> (?P<linktarget>[^\r]*))?\r?$'
)
delimiter = '\n'
def __init__(self):
self.files = []
def lineReceived(self, line):
d = self.parseDirectoryLine(line)
if d is None:
self.unknownLine(line)
else:
self.addFile(d)
def parseDirectoryLine(self, line):
"""Return a dictionary of fields, or None if line cannot be parsed.
@param line: line of text expected to contain a directory entry
@type line: str
@return: dict
"""
match = self.fileLinePattern.match(line)
if match is None:
return None
else:
d = match.groupdict()
d['filename'] = d['filename'].replace(r'\ ', ' ')
d['nlinks'] = int(d['nlinks'])
d['size'] = int(d['size'])
if d['linktarget']:
d['linktarget'] = d['linktarget'].replace(r'\ ', ' ')
return d
def addFile(self, info):
"""Append file information dictionary to the list of known files.
Subclasses can override or extend this method to handle file
information differently without affecting the parsing of data
from the server.
@param info: dictionary containing the parsed representation
of the file information
@type info: dict
"""
self.files.append(info)
def unknownLine(self, line):
"""Deal with received lines which could not be parsed as file
information.
Subclasses can override this to perform any special processing
needed.
@param line: unparsable line as received
@type line: str
"""
pass
def parsePWDResponse(response):
"""Returns the path from a response to a PWD command.
Responses typically look like::
257 "/home/andrew" is current directory.
For this example, I will return C{'/home/andrew'}.
If I can't find the path, I return C{None}.
"""
match = re.search('"(.*)"', response)
if match:
return match.groups()[0]
else:
return None
| agpl-3.0 |
mutanthost/plexhole | lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py | 354 | 10534 | from __future__ import absolute_import, division, unicode_literals
from . import base
class Filter(base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
if previous1 is not None:
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| gpl-2.0 |
bretttegart/treadmill | tests/plugins/zookeeper_test.py | 3 | 2218 | """
Unit test for zookeeper plugin
"""
import unittest
import mock
from treadmill.plugins import zookeeper
class ZookeeperTest(unittest.TestCase):
@mock.patch('kazoo.client.KazooClient')
def test_connect_without_connargs(self, kazooClientMock):
zkurl = 'zookeeper://foo@123:21'
zookeeper.connect(zkurl, {})
kazooClientMock.assert_called_once_with(
hosts='123:21',
sasl_data={
'service': 'zookeeper',
'mechanisms': ['GSSAPI']
})
@mock.patch('kazoo.client.KazooClient')
def test_connect_with_connargs(self, kazooClientMock):
zkurl = 'zookeeper://foobar:123'
connargs = {
'hosts': 'foobar:123',
'sasl_data': {
'service': 'foo',
'mechanisms': 'bar'
}
}
zookeeper.connect(zkurl, connargs)
kazooClientMock.assert_called_once_with(
hosts='foobar:123',
sasl_data={
'service': 'foo',
'mechanisms': 'bar'
})
@mock.patch('kazoo.security.make_acl')
def test_make_user_acl(self, makeAclMock):
zookeeper.make_user_acl('foo', 'rw')
makeAclMock.assert_called_once_with(
scheme='sasl', credential='foo', read=True,
write=True, create=False, delete=False, admin=False
)
@mock.patch('kazoo.security.make_acl')
def test_make_role_acl_with_valid_role(self, makeAclMock):
zookeeper.make_role_acl('servers', 'ra')
makeAclMock.assert_called_once_with(
scheme='sasl', credential='role/servers', read=True,
write=False, delete=False, create=False, admin=True
)
def test_make_role_acl_with_invalid_role(self):
with self.assertRaises(AssertionError):
zookeeper.make_role_acl('foo', 'rwc')
@mock.patch('kazoo.security.make_acl')
def test_make_host_acl(self, makeAclMock):
zookeeper.make_host_acl('foo@123', 'rdwca')
makeAclMock.assert_called_once_with(
scheme='sasl', credential='host/foo@123', read=True,
write=True, delete=True, create=True, admin=True
)
| apache-2.0 |
oorestisime/debsources | debsources/app/sources/__init__.py | 6 | 1057 | # Copyright (C) 2015 The Debsources developers <info@sources.debian.net>.
# See the AUTHORS file at the top-level directory of this distribution and at
# https://anonscm.debian.org/gitweb/?p=qa/debsources.git;a=blob;f=AUTHORS;hb=HEAD
#
# This file is part of Debsources. Debsources is free software: you can
# redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version. For more information
# see the COPYING file at the top-level directory of this distribution and at
# https://anonscm.debian.org/gitweb/?p=qa/debsources.git;a=blob;f=COPYING;hb=HEAD
from __future__ import absolute_import
from flask import Blueprint
# naming rule: bp_{dirname}
bp_sources = Blueprint('sources',
__name__,
template_folder='templates',
static_url_path='/static/sources',
static_folder='static')
from . import routes # NOQA
| agpl-3.0 |
lordblackfox/aircox | aircox/admin/page.py | 2 | 3870 | from copy import deepcopy
from django.contrib import admin
from django.http import QueryDict
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from adminsortable2.admin import SortableInlineAdminMixin
from ..models import Category, Comment, NavItem, Page, StaticPage
__all__ = ['CategoryAdmin', 'PageAdmin', 'NavItemInline']
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['pk', 'title', 'slug']
list_editable = ['title', 'slug']
search_fields = ['title']
fields = ['title', 'slug']
prepopulated_fields = {"slug": ("title",)}
class BasePageAdmin(admin.ModelAdmin):
list_display = ('cover_thumb', 'title', 'status', 'parent')
list_display_links = ('cover_thumb', 'title')
list_editable = ('status',)
list_filter = ('status',)
prepopulated_fields = {"slug": ("title",)}
# prepopulate fields using changelist's filters
prepopulated_filters = ('parent',)
search_fields = ('title',)
fieldsets = [
('', {
'fields': ['title', 'slug', 'cover', 'content'],
}),
(_('Publication Settings'), {
'fields': ['status', 'parent'],
}),
]
change_form_template = 'admin/aircox/page_change_form.html'
def cover_thumb(self, obj):
return mark_safe('<img src="{}"/>'.format(obj.cover.icons['64'])) \
if obj.cover else ''
def get_changeform_initial_data(self, request):
data = super().get_changeform_initial_data(request)
filters = QueryDict(request.GET.get('_changelist_filters', ''))
data['parent'] = filters.get('parent', None)
return data
def _get_common_context(self, query, extra_context=None):
extra_context = extra_context or {}
parent = query.get('parent', None)
extra_context['parent'] = None if parent is None else \
Page.objects.get_subclass(id=parent)
return extra_context
def render_change_form(self, request, context, *args, **kwargs):
if context['original'] and not 'parent' in context:
context['parent'] = context['original'].parent
return super().render_change_form(request, context, *args, **kwargs)
def add_view(self, request, form_url='', extra_context=None):
filters = QueryDict(request.GET.get('_changelist_filters', ''))
extra_context = self._get_common_context(filters, extra_context)
return super().add_view(request, form_url, extra_context)
def changelist_view(self, request, extra_context=None):
extra_context = self._get_common_context(request.GET, extra_context)
return super().changelist_view(request, extra_context)
class PageAdmin(BasePageAdmin):
change_list_template = 'admin/aircox/page_change_list.html'
list_display = BasePageAdmin.list_display + ('category',)
list_editable = BasePageAdmin.list_editable + ('category',)
list_filter = BasePageAdmin.list_editable + ('category',)
search_fields = ('category__title',)
fieldsets = deepcopy(BasePageAdmin.fieldsets)
fieldsets[0][1]['fields'].insert(fieldsets[0][1]['fields'].index('slug') + 1, 'category')
fieldsets[1][1]['fields'] += ('featured', 'allow_comments')
@admin.register(StaticPage)
class StaticPageAdmin(BasePageAdmin):
list_display = BasePageAdmin.list_display + ('attach_to',)
fieldsets = deepcopy(BasePageAdmin.fieldsets)
fieldsets[1][1]['fields'] += ('attach_to',)
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('page_title', 'date', 'nickname')
list_filter = ('date',)
search_fields = ('page__title', 'nickname')
def page_title(self, obj):
return obj.page.title
class NavItemInline(SortableInlineAdminMixin, admin.TabularInline):
model = NavItem
| gpl-3.0 |
JeyZeta/Dangerous | Dangerous/Golismero/tools/sqlmap/lib/core/subprocessng.py | 7 | 5632 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import errno
import os
import subprocess
import sys
import time
from lib.core.settings import IS_WIN
if IS_WIN:
try:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
except ImportError:
pass
import msvcrt
else:
import select
import fcntl
if (sys.hexversion >> 16) >= 0x202:
FCNTL = fcntl
else:
import FCNTL
def blockingReadFromFD(fd):
# Quick twist around original Twisted function
# Blocking read from a non-blocking file descriptor
output = ""
while True:
try:
output += os.read(fd, 8192)
except (OSError, IOError), ioe:
if ioe.args[0] in (errno.EAGAIN, errno.EINTR):
# Uncomment the following line if the process seems to
# take a huge amount of cpu time
# time.sleep(0.01)
continue
else:
raise
break
if not output:
raise EOFError("fd %s has been closed." % fd)
return output
def blockingWriteToFD(fd, data):
# Another quick twist
while True:
try:
data_length = len(data)
wrote_data = os.write(fd, data)
except (OSError, IOError), io:
if io.errno in (errno.EAGAIN, errno.EINTR):
continue
else:
raise
if wrote_data < data_length:
blockingWriteToFD(fd, data[wrote_data:])
break
# the following code is taken from http://code.activestate.com/recipes/440554-module-to-allow-asynchronous-subprocess-use-on-win/
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if subprocess.mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
if self.universal_newlines:
read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError, why:
if why[0] == errno.EPIPE: # broken pipe
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
if self.universal_newlines:
r = self._translate_newlines(r)
return r
finally:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
def recv_some(p, t=.1, e=1, tr=5, stderr=0):
if tr < 1:
tr = 1
x = time.time() + t
y = []
r = ''
if stderr:
pr = p.recv_err
else:
pr = p.recv
while time.time() < x or r:
r = pr()
if r is None:
break
elif r:
y.append(r)
else:
time.sleep(max((x - time.time()) / tr, 0))
return ''.join(y)
def send_all(p, data):
if not data:
return
while len(data):
sent = p.send(data)
if not isinstance(sent, int):
break
data = buffer(data, sent)
| mit |
multispot-software/transfer_convert | nbrun.py | 1 | 8332 | # Copyright (c) 2015-2017 Antonino Ingargiola
# License: MIT
"""
nbrun - Run an Jupyter/IPython notebook, optionally passing arguments.
USAGE
-----
Copy this file in the folder containing the master notebook used to
execute the other notebooks. Then use `run_notebook()` to execute
notebooks.
"""
import time
from pathlib import Path
from IPython.display import display, FileLink
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert import HTMLExporter
__version__ = '0.2'
def dict_to_code(mapping):
"""Convert input dict `mapping` to a string containing python code.
Each key is the name of a variable and each value is
the variable content. Each variable assignment is separated by
a newline.
Keys must be strings, and cannot start with a number (i.e. must be
valid python identifiers). Values must be objects with a string
representation (the result of repr(obj)) which is valid python code for
re-creating the object.
For examples, numbers, strings or list/tuple/dict
of numbers and strings are allowed.
Returns:
A string containing the python code.
"""
lines = ("{} = {}".format(key, repr(value))
for key, value in mapping.items())
return '\n'.join(lines)
def run_notebook(notebook_path, nb_kwargs=None, suffix='-out',
out_path_ipynb=None, out_path_html=None,
kernel_name=None, working_dir='./',
timeout=3600, execute_kwargs=None,
save_ipynb=True, save_html=False,
insert_pos=1, hide_input=False, display_links=True,
return_nb=False):
"""Runs a notebook and saves the output in a new notebook.
Executes a notebook, optionally passing "arguments"
similarly to passing arguments to a function.
Notebook arguments are passed in a dictionary (`nb_kwargs`) which is
converted into a string containing python assignments. This string is
inserted in the template notebook as a code cell. The code assigns
variables which can be used to control the execution. When "calling"
a notebook, you need to know which arguments (variables) to pass.
Unlike normal python functions, no check is performed on the input
arguments. For sanity, we recommended describing the variables that
can be assigned using a markdown cell at the beginning of the template
notebook.
Arguments:
notebook_path (pathlib.Path or string): input notebook filename.
This is the notebook to be executed (i.e. template notebook).
nb_kwargs (dict or None): If not None, this dict is converted to a
string of python assignments using the dict keys as variables
names and the dict values as variables content. This string is
inserted as code-cell in the notebook to be executed.
suffix (string): suffix to append to the file name of the executed
notebook. Argument ignored if `out_notebook_path` is not None.
out_path_ipynb (pathlib.Path, string or None): file name for the
output ipynb notebook. If None, the ouput ipynb notebook has
the same name as the input notebook plus a suffix, specified
by the `suffix` argument. If not None, `suffix` is ignored.
If argument `save_ipynb` is False this argument is ignored.
out_path_html (pathlib.Path, string or None): file name for the
output HTML notebook. If None, the ouput HTML notebook has
the same name as the input notebook plus a suffix, specified
by the `suffix` argument. If not None, `suffix` is ignored.
If argument `save_html` is False this argument is ignored.
kernel_name (string or None): name of the kernel used to execute the
notebook. Use the default kernel if None.
working_dir (string or Path): the folder the kernel is started into.
timeout (int): max execution time (seconds) for each cell before the
execution is aborted.
execute_kwargs (dict): additional arguments passed to
`ExecutePreprocessor`.
save_ipynb (bool): if True, save the output notebook in ipynb format.
Default True.
save_html (bool): if True, save the output notebook in HTML format.
Default False.
insert_pos (int): position of insertion of the code-cell containing
the input arguments. Default is 1 (i.e. second cell). With this
default, the input notebook can define, in the first cell, default
values of input arguments (used when the notebook is executed
with no arguments or through the Notebook GUI).
hide_input (bool): whether to create a notebook with input cells
hidden (useful to remind user that the auto-generated output
is not meant to have the code edited.
display_links (bool): if True, display/print "link" of template and
output notebooks. Links are only rendered in a notebook.
In a text terminal, links are displayed as full file names.
return_nb (bool): if True, returns the notebook object. If False
returns None. Default False.
"""
timestamp_cell = ("**Executed:** %s\n\n**Duration:** %d seconds.\n\n"
"**Autogenerated from:** [%s](%s)")
if nb_kwargs is None:
nb_kwargs = {}
else:
header = '# Cell inserted during automated execution.'
code = dict_to_code(nb_kwargs)
code_cell = '\n'.join((header, code))
notebook_path = Path(notebook_path)
if not notebook_path.is_file():
raise FileNotFoundError("Path '%s' not found." % notebook_path)
def check_out_path(notebook_path, out_path, ext, save):
if out_path is None:
out_path = Path(notebook_path.parent,
notebook_path.stem + suffix + ext)
out_path = Path(out_path)
if save and not out_path.parent.exists():
msg = "Folder of the output %s file was not found:\n - %s\n."
raise FileNotFoundError(msg % (ext, out_path_ipynb.parent))
return out_path
out_path_ipynb = check_out_path(notebook_path, out_path_ipynb,
ext='.ipynb', save=save_ipynb)
out_path_html = check_out_path(notebook_path, out_path_html,
ext='.html', save=save_html)
if display_links:
display(FileLink(str(notebook_path)))
if execute_kwargs is None:
execute_kwargs = {}
execute_kwargs.update(timeout=timeout)
if kernel_name is not None:
execute_kwargs.update(kernel_name=kernel_name)
ep = ExecutePreprocessor(**execute_kwargs)
nb = nbformat.read(str(notebook_path), as_version=4)
if hide_input:
nb["metadata"].update({"hide_input": True})
if len(nb_kwargs) > 0:
nb['cells'].insert(insert_pos, nbformat.v4.new_code_cell(code_cell))
start_time = time.time()
try:
# Execute the notebook
ep.preprocess(nb, {'metadata': {'path': working_dir}})
except:
# Execution failed, print a message then raise.
msg = ('Error executing the notebook "%s".\n'
'Notebook arguments: %s\n\n'
'See notebook "%s" for the traceback.' %
(notebook_path, str(nb_kwargs), out_path_ipynb))
print(msg)
timestamp_cell += '\n\nError occurred during execution. See below.'
raise
finally:
# Add timestamping cell
duration = time.time() - start_time
timestamp_cell = timestamp_cell % (time.ctime(start_time), duration,
notebook_path, out_path_ipynb)
nb['cells'].insert(0, nbformat.v4.new_markdown_cell(timestamp_cell))
# Save the executed notebook to disk
if save_ipynb:
nbformat.write(nb, str(out_path_ipynb))
if display_links:
display(FileLink(str(out_path_ipynb)))
if save_html:
html_exporter = HTMLExporter()
body, resources = html_exporter.from_notebook_node(nb)
with open(str(out_path_html), 'w') as f:
f.write(body)
if return_nb:
return nb
| mit |
HiSPARC/station-software | user/hsmonitor/Uploader.py | 1 | 7589 | """Upload Events to the datastore
This module retrieves Events from the storage using the StorageManager
and uploads the events in batches to the datastore.
"""
import logging
from time import sleep, time
from cPickle import dumps
import requests
from threading import Thread, Semaphore, Event
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
from Observer import Observer
from StorageManager import StorageManager
from NagiosPush import NagiosPush
from NagiosResult import NagiosResult
from UserExceptions import ThreadCrashError
logger = logging.getLogger('hsmonitor.uploader')
# TODO add observer
# use BUI's trick to stop a thread
BATCHSIZE = 100
# the waiting time will be doubled for each failed attempt
MINWAIT = 1 # minimum time to wait in seconds after a failed attempt
MAXWAIT = 60 # maximum time to wait in seconds after a failed attempt
# requests (connection, read) timeout
# timeout quickly on connections, but wait for response after upload
# a long ReadTimeout prevents duplicate events (see issue #2)
TIMEOUT = (3, 600)
class Uploader(Observer, Thread):
def __init__(self, serverID, stationID, password, URL, config,
retryAfter=MINWAIT, maxWait=MAXWAIT,
minBatchSize=BATCHSIZE, maxBatchSize=BATCHSIZE):
self.storageManager = StorageManager()
self.serverID = serverID
self.stationID = stationID
self.password = password
self.URL = URL
self.nagiosPush = NagiosPush(config)
self.minBatchSize = minBatchSize
self.maxBatchSize = maxBatchSize
self.retryAfter = MINWAIT
self.maxWait = MAXWAIT
# lock to protect numEvents
self.numEventsLock = Semaphore(1)
# Semaphore to block if the number of events drops below minBatchSize
self.noEventsSem = Semaphore(0)
super(Uploader, self).__init__(name='Uploader')
self.stop_event = Event()
self.isRunning = False
def setNumServer(self, numServer):
"""Sets the number of servers to upload to.
Need to be set before changing the UploadedTo-status
of events in the StorageManager.
"""
self.storageManager.setNumServer(numServer)
def stop(self):
self.stop_event.set()
# release semaphore
self.noEventsSem.release()
crashes = []
def init_restart(self):
"""Support for restarting crashed threads."""
if len(self.crashes) > 3 and time() - self.crashes[-3] < 60.:
raise ThreadCrashError("Thread has crashed three times in "
"less than a minute")
else:
super(Uploader, self).__init__()
self.crashes.append(time())
def notify(self, count=1):
"""Notify the uploader that count events were received."""
if (self.isRunning):
shouldRelease = 0
self.numEventsLock.acquire()
oldNumEvents = self.numEvents
self.numEvents += count
logger.debug('%i: %i events pending.' %
(self.serverID, self.numEvents))
# calculate if uploader-thread should be unblocked
if (self.numEvents >= self.minBatchSize and
oldNumEvents < self.minBatchSize):
shouldRelease = 1
self.numEventsLock.release()
if (shouldRelease):
self.noEventsSem.release()
def __getNumEventsToUpload(self):
"""Gives the number of events that the Uploader can upload now.
The result will be between min and max batch size. If insufficient
events are available this function will block on noEventSem.
"""
shouldBlock = False
self.numEventsLock.acquire()
res = min(self.numEvents, self.maxBatchSize)
if (res < self.minBatchSize):
shouldBlock = True
self.numEventsLock.release()
if shouldBlock:
logger.debug('%i: Blocked: Too few events' % self.serverID)
self.noEventsSem.acquire()
logger.debug('%i: Unblocked' % self.serverID)
return self.minBatchSize
else:
return res
def __upload(self, elist):
"""Upload a list of events to the database server."""
elist_data = dumps(elist)
checksum = md5(elist_data).hexdigest()
data = {
'station_id': self.stationID,
'password': self.password,
'data': elist_data,
'checksum': checksum
}
# Open the connection and send our data. Exceptions are caught
# explicitly to make sure we understand the implications of errors.
try:
r = requests.post(self.URL, data=data, timeout=TIMEOUT)
except (requests.Timeout, requests.ConnectionError) as exc:
returncode = ('Connection error in function'
'__upload: %s' % str(exc))
except Exception as exc:
returncode = ('Uncatched exception occured in function '
'__upload: %s' % str(exc))
else:
returncode = r.text
return returncode
def run(self):
"""Main loop that continously polls for new events and uploads them"""
logger.info('%i: Thread started for %s.' % (self.serverID, self.URL))
# Initialize storage manager
self.storageManager.openConnection()
# Number of events that have been received
logger.debug('%i: Getting number of events to upload.' % self.serverID)
self.numEvents = self.storageManager.getNumEventsServer(self.serverID)
logger.debug('%i: %i events in storage.' %
(self.serverID, self.numEvents))
self.isRunning = True
numFailedAttempts = 0
while not self.stop_event.isSet():
bsize = self.__getNumEventsToUpload()
(elist, eidlist) = self.storageManager.getEvents(self.serverID,
bsize)
returncode = self.__upload(elist)
if returncode == '100':
logger.info('%i: %d events uploaded to %s.' %
(self.serverID, bsize, self.URL))
numFailedAttempts = 0
# Record successful upload in storagemanager
self.storageManager.setUploaded(self.serverID, eidlist)
# Reduce counter
self.numEventsLock.acquire()
self.numEvents -= bsize
self.numEventsLock.release()
else:
numFailedAttempts += 1
msg1 = ("Error Uploader %i: %s: Return code: %s." %
(self.serverID, self.URL, returncode))
logger.error(msg1)
msg2 = ("Error Uploader %i: %d events attempted to upload, "
"number of failed attempts: %i." %
(self.serverID, bsize, numFailedAttempts))
logger.error(msg2)
msg3 = msg1 + "\n" + msg2
nr = NagiosResult(2, msg3, "ServerCheck")
self.nagiosPush.sendToNagios(nr)
sleeptime = min(2 ** numFailedAttempts * self.retryAfter,
self.maxWait)
logger.debug('%i: Sleeping for %f seconds.' %
(self.serverID, sleeptime))
sleep(sleeptime)
logger.warning('%i: Thread stopped!' % self.servedID)
| gpl-3.0 |
jk1/intellij-community | python/helpers/pydev/_pydev_imps/_pydev_inspect.py | 92 | 31489 | """Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), istraceback(),
isframe(), iscode(), isbuiltin(), isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues() - get info about function arguments
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
"""
# This module is in the public domain. No warranties.
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__date__ = '1 Jan 2001'
import sys, os, types, string, re, imp, tokenize
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, types.ClassType) or hasattr(object, '__bases__')
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
im_func attribute (etc) when an object passes ismethod()."""
return (hasattr(object, "__get__")
and not hasattr(object, "__set__") # else it's a data descriptor
and not ismethod(object) # mutual exclusion
and not isfunction(object)
and not isclass(object))
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)"""
return isinstance(object, types.FunctionType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_exc_traceback traceback if raised in this frame, or None
f_exc_type exception type if raised in this frame, or None
f_exc_value exception value if raised in this frame, or None
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_restricted 0 or 1 if frame is in restricted execution mode
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
value = getattr(object, key)
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
if name in cls.__dict__:
obj = cls.__dict__[name]
else:
obj = getattr(cls, name)
# Figure out where it was defined.
homecls = getattr(obj, "__objclass__", None)
if homecls is None:
# search the dicts.
for base in mro:
if name in base.__dict__:
homecls = base
break
# Get the object again, in order to get it from the defining
# __dict__ instead of via getattr (if possible).
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name]
# Also get the object via getattr.
obj_via_getattr = getattr(cls, name)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif (ismethod(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
result.append((name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def _searchbases(cls, accum):
# Simulate the "classic class" search order.
if cls in accum:
return
accum.append(cls)
for base in cls.__bases__:
_searchbases(base, accum)
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
if hasattr(cls, "__mro__"):
return cls.__mro__
else:
result = []
_searchbases(cls, result)
return tuple(result)
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = string.expandtabs(line)
return len(expline) - len(string.lstrip(expline))
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, (str, unicode)):
return None
try:
lines = string.split(string.expandtabs(doc), '\n')
except UnicodeError:
return None
else:
margin = None
for line in lines[1:]:
content = len(string.lstrip(line))
if not content: continue
indent = len(line) - content
if margin is None: margin = indent
else: margin = min(margin, indent)
if margin is not None:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
return string.join(lines, '\n')
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError, 'arg is a built-in module'
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError, 'arg is a built-in class'
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError, 'arg is not a module, class, method, ' \
'function, traceback, frame, or code object'
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = map(lambda (suffix, mode, mtype):
(-len(suffix), suffix, mode, mtype), imp.get_suffixes())
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return filename[:neglen], suffix, mode, mtype
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
def getsourcefile(object):
"""Return the Python source file an object was defined in, if it exists."""
filename = getfile(object)
if string.lower(filename[-4:]) in ['.pyc', '.pyo']:
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
# Looks like a binary file. We want to only return a text file.
return None
if os.path.exists(filename):
return filename
def getabsfile(object):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
return os.path.normcase(
os.path.abspath(getsourcefile(object) or getfile(object)))
modulesbyfile = {}
def getmodule(object):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if isclass(object):
return sys.modules.get(object.__module__)
try:
file = getabsfile(object)
except TypeError:
return None
if modulesbyfile.has_key(file):
return sys.modules[modulesbyfile[file]]
for module in sys.modules.values():
if hasattr(module, '__file__'):
modulesbyfile[getabsfile(module)] = module.__name__
if modulesbyfile.has_key(file):
return sys.modules[modulesbyfile[file]]
main = sys.modules['__main__']
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
try:
file = open(getsourcefile(object))
except (TypeError, IOError):
raise IOError, 'could not get source code'
lines = file.readlines()
file.close()
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^\s*class\s*' + name + r'\b')
for i in range(len(lines)):
if pat.match(lines[i]): return lines, i
else: raise IOError, 'could not find class definition'
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError, 'could not find function definition'
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*\slambda(:|\s))')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError, 'could not find code object'
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code."""
try: lines, lnum = findsource(object)
except IOError: return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ['', '#']:
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '')
class ListReader:
"""Provide a readline() method to return lines from a list of strings."""
def __init__(self, lines):
self.lines = lines
self.index = 0
def readline(self):
i = self.index
if i < len(self.lines):
self.index = i + 1
return self.lines[i]
else: return ''
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.started = 0
self.last = 0
def tokeneater(self, type, token, (srow, scol), (erow, ecol), line):
if not self.started:
if type == tokenize.NAME: self.started = 1
elif type == tokenize.NEWLINE:
self.last = srow
elif type == tokenize.INDENT:
self.indent = self.indent + 1
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
if self.indent == 0: raise EndOfBlock, self.last
elif type == tokenize.NAME and scol == 0:
raise EndOfBlock, self.last
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
try:
tokenize.tokenize(ListReader(lines).readline, BlockFinder().tokeneater)
except EndOfBlock, eob:
return lines[:eob.args[0]]
# Fooling the indent/dedent logic implies a one-line definition
return lines[:1]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return string.join(lines, '')
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(lambda a, b: cmp(a.__name__, b.__name__))
for c in classes:
results.append((c, c.__bases__))
if children.has_key(c):
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=0):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not children.has_key(parent):
children[parent] = []
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children.keys():
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
# These constants are from Python's compile.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co): raise TypeError, 'arg is not a code object'
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
if not sys.platform.startswith('java'):#Jython doesn't have co_code
code = co.co_code
import dis
for i in range(nargs):
if args[i][:1] in ['', '.']:
stack, remain, count = [], [], []
while step < len(code):
op = ord(code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(code[step]) + ord(code[step + 1]) * 256
step = step + 2
if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']:
remain.append(value)
count.append(value)
elif opname == 'STORE_FAST':
stack.append(names[value])
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments."""
if ismethod(func):
func = func.im_func
if not isfunction(func): raise TypeError, 'arg is not a Python function'
args, varargs, varkw = getargs(func.func_code)
return args, varargs, varkw, func.func_defaults
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return args, varargs, varkw, frame.f_locals
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + string.join(seq, ', ') + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in [types.ListType, types.TupleType]:
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs:
specs.append(formatvarargs(varargs))
if varkw:
specs.append(formatvarkw(varkw))
return '(' + string.join(specs, ', ') + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
# -------------------------------------------------- stack frame extraction
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
raise NotImplementedError
# if istraceback(frame):
# frame = frame.tb_frame
# if not isframe(frame):
# raise TypeError, 'arg is not a frame or traceback object'
#
# filename = getsourcefile(frame)
# lineno = getlineno(frame)
# if context > 0:
# start = lineno - 1 - context//2
# try:
# lines, lnum = findsource(frame)
# except IOError:
# lines = index = None
# else:
# start = max(start, 1)
# start = min(start, len(lines) - context)
# lines = lines[start:start+context]
# index = lineno - 1 - start
# else:
# lines = index = None
#
# return (filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# Written by Marc-Andr Lemburg; revised by Jim Hugunin and Fredrik Lundh.
lineno = frame.f_lineno
code = frame.f_code
if hasattr(code, 'co_lnotab'):
table = code.co_lnotab
lineno = code.co_firstlineno
addr = 0
for i in range(0, len(table), 2):
addr = addr + ord(table[i])
if addr > frame.f_lasti: break
lineno = lineno + ord(table[i + 1])
return lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise 'catch me'
except:
return sys.exc_traceback.tb_frame.f_back #@UndefinedVariable
if hasattr(sys, '_getframe'): currentframe = sys._getframe
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(currentframe().f_back, context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_traceback, context) #@UndefinedVariable
| apache-2.0 |
teslaji/homebase | venv/HomeBase/lib/python2.7/site-packages/wheel/bdist_wheel.py | 219 | 17006 | """
Create a wheel (.whl) distribution.
A wheel is a built archive format.
"""
import csv
import hashlib
import os
import subprocess
import warnings
import shutil
import json
import wheel
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import pkg_resources
safe_name = pkg_resources.safe_name
safe_version = pkg_resources.safe_version
from shutil import rmtree
from email.generator import Generator
from distutils.util import get_platform
from distutils.core import Command
from distutils.sysconfig import get_python_version
from distutils import log as logger
from .pep425tags import get_abbr_impl, get_impl_ver
from .util import native, open_for_csv
from .archive import archive_wheelfile
from .pkginfo import read_pkg_info, write_pkg_info
from .metadata import pkginfo_to_dict
from . import pep425tags, metadata
def safer_name(name):
return safe_name(name).replace('-', '_')
def safer_version(version):
return safe_version(version).replace('-', '_')
class bdist_wheel(Command):
description = 'create a wheel distribution'
user_options = [('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
('universal', None,
"make a universal wheel"
" (default: false)"),
('python-tag=', None,
"Python implementation compatibility tag"
" (default: py%s)" % get_impl_ver()[0]),
]
boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
def initialize_options(self):
self.bdist_dir = None
self.data_dir = None
self.plat_name = None
self.format = 'zip'
self.keep_temp = False
self.dist_dir = None
self.distinfo_dir = None
self.egginfo_dir = None
self.root_is_purelib = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal = False
self.python_tag = 'py' + get_impl_ver()[0]
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wheel')
self.data_dir = self.wheel_dist_name + '.data'
need_options = ('dist_dir', 'plat_name', 'skip_build')
self.set_undefined_options('bdist',
*zip(need_options, need_options))
self.root_is_purelib = self.distribution.is_pure()
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict('wheel')
if 'universal' in wheel:
# please don't define this in your global configs
val = wheel['universal'][1].strip()
if val.lower() in ('1', 'true', 'yes'):
self.universal = True
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version())))
def get_tag(self):
supported_tags = pep425tags.get_supported()
if self.distribution.is_pure():
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', 'any')
else:
plat_name = self.plat_name
if plat_name is None:
plat_name = get_platform()
plat_name = plat_name.replace('-', '_').replace('.', '_')
impl_name = get_abbr_impl()
impl_ver = get_impl_ver()
# PEP 3149 -- no SOABI in Py 2
# For PyPy?
# "pp%s%s" % (sys.pypy_version_info.major,
# sys.pypy_version_info.minor)
abi_tag = sysconfig.get_config_vars().get('SOABI', 'none')
if abi_tag.startswith('cpython-'):
abi_tag = 'cp' + abi_tag.rsplit('-', 1)[-1]
tag = (impl_name + impl_ver, abi_tag, plat_name)
# XXX switch to this alternate implementation for non-pure:
assert tag == supported_tags[0]
return tag
def get_archive_basename(self):
"""Return archive name without extension"""
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "%s-%s-%s-%s" % (
self.wheel_dist_name,
impl_tag,
abi_tag,
plat_tag)
return archive_basename
def run(self):
build_scripts = self.reinitialize_command('build_scripts')
build_scripts.executable = 'python'
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install',
reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command('install_scripts')
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
setattr(install,
'install_' + key,
os.path.join(self.data_dir, key))
basedir_observed = ''
if os.name == 'nt':
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.join(self.data_dir, '..')
self.install_libbase = self.install_lib = basedir_observed
setattr(install,
'install_purelib' if self.root_is_purelib else 'install_platlib',
basedir_observed)
logger.info("installing to %s", self.bdist_dir)
self.run_command('install')
archive_basename = self.get_archive_basename()
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir,
self._ensure_relative(install.install_base))
self.set_undefined_options(
'install_egg_info', ('target', 'egginfo_dir'))
self.distinfo_dir = os.path.join(self.bdist_dir,
'%s.dist-info' % self.wheel_dist_name)
self.egg2dist(self.egginfo_dir,
self.distinfo_dir)
self.write_wheelfile(self.distinfo_dir)
self.write_record(self.bdist_dir, self.distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_name = archive_wheelfile(pseudoinstall_root, archive_root)
# Sign the archive
if 'WHEEL_TOOL' in os.environ:
subprocess.call([os.environ['WHEEL_TOOL'], 'sign', wheel_name])
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_wheel', get_python_version(), wheel_name))
if not self.keep_temp:
if self.dry_run:
logger.info('removing %s', self.bdist_dir)
else:
rmtree(self.bdist_dir)
def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel.__version__ + ')'):
from email.message import Message
msg = Message()
msg['Wheel-Version'] = '1.0' # of the spec
msg['Generator'] = generator
msg['Root-Is-Purelib'] = str(self.root_is_purelib).lower()
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split('.'):
for abi in abi_tag.split('.'):
for plat in plat_tag.split('.'):
msg['Tag'] = '-'.join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
logger.info('creating %s', wheelfile_path)
with open(wheelfile_path, 'w') as f:
Generator(f, maxheaderlen=0).flatten(msg)
def _ensure_relative(self, path):
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
def _pkginfo_to_metadata(self, egg_info_path, pkginfo_path):
return metadata.pkginfo_to_metadata(egg_info_path, pkginfo_path)
def license_file(self):
"""Return license filename from a license-file key in setup.cfg, or None."""
metadata = self.distribution.get_option_dict('metadata')
if not 'license_file' in metadata:
return None
return metadata['license_file'][1]
def setupcfg_requirements(self):
"""Generate requirements from setup.cfg as
('Requires-Dist', 'requirement; qualifier') tuples. From a metadata
section in setup.cfg:
[metadata]
provides-extra = extra1
extra2
requires-dist = requirement; qualifier
another; qualifier2
unqualified
Yields
('Provides-Extra', 'extra1'),
('Provides-Extra', 'extra2'),
('Requires-Dist', 'requirement; qualifier'),
('Requires-Dist', 'another; qualifier2'),
('Requires-Dist', 'unqualified')
"""
metadata = self.distribution.get_option_dict('metadata')
# our .ini parser folds - to _ in key names:
for key, title in (('provides_extra', 'Provides-Extra'),
('requires_dist', 'Requires-Dist')):
if not key in metadata:
continue
field = metadata[key]
for line in field[1].splitlines():
line = line.strip()
if not line:
continue
yield (title, line)
def add_requirements(self, metadata_path):
"""Add additional requirements from setup.cfg to file metadata_path"""
additional = list(self.setupcfg_requirements())
if not additional: return
pkg_info = read_pkg_info(metadata_path)
if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info:
warnings.warn('setup.cfg requirements overwrite values from setup.py')
del pkg_info['Provides-Extra']
del pkg_info['Requires-Dist']
for k, v in additional:
pkg_info[k] = v
write_pkg_info(metadata_path, pkg_info)
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: set(('PKG-INFO',
'requires.txt',
'SOURCES.txt',
'not-zip-safe',)))
# delete dependency_links if it is only whitespace
dependency_links = os.path.join(distinfo_path, 'dependency_links.txt')
if not open(dependency_links, 'r').read().strip():
adios(dependency_links)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
# XXX deprecated. Still useful for current distribute/setuptools.
metadata_path = os.path.join(distinfo_path, 'METADATA')
self.add_requirements(metadata_path)
# XXX intentionally a different path than the PEP.
metadata_json_path = os.path.join(distinfo_path, 'metadata.json')
pymeta = pkginfo_to_dict(metadata_path,
distribution=self.distribution)
if 'description' in pymeta:
description_filename = 'DESCRIPTION.rst'
description_text = pymeta.pop('description')
description_path = os.path.join(distinfo_path,
description_filename)
with open(description_path, "wb") as description_file:
description_file.write(description_text.encode('utf-8'))
pymeta['extensions']['python.details']['document_names']['description'] = description_filename
# XXX heuristically copy any LICENSE/LICENSE.txt?
license = self.license_file()
if license:
license_filename = 'LICENSE.txt'
shutil.copy(license, os.path.join(self.distinfo_dir, license_filename))
pymeta['extensions']['python.details']['document_names']['license'] = license_filename
with open(metadata_json_path, "w") as metadata_json:
json.dump(pymeta, metadata_json)
adios(egginfo_path)
def write_record(self, bdist_dir, distinfo_dir):
from wheel.util import urlsafe_b64encode
record_path = os.path.join(distinfo_dir, 'RECORD')
record_relpath = os.path.relpath(record_path, bdist_dir)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
for f in files:
yield os.path.join(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relpath = os.path.relpath(path, bdist_dir)
if skip(relpath):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
record_path = os.path.relpath(
path, bdist_dir).replace(os.path.sep, '/')
writer.writerow((record_path, hash, size))
| gpl-3.0 |
JVillella/tensorflow | tensorflow/python/__init__.py | 7 | 8573 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import core names of TensorFlow.
Programs that want to build TensorFlow Ops and Graphs without having to import
the constructors and utilities individually can import this file:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
"""
import ctypes
import importlib
import sys
import traceback
# TODO(drpng): write up instructions for editing this file in a doc and point to
# the doc instead.
# If you want to edit this file to expose modules in public tensorflow API, you
# need to follow these steps:
# 1. Consult with tensorflow team and get approval for adding a new API to the
# public interface.
# 2. Document the module in the gen_docs_combined.py.
# 3. Import the module in the main tensorflow namespace by adding an import
# statement in this file.
# 4. Sanitize the entry point by making sure that your module does not expose
# transitively imported modules used for implementation, such as os, sys.
# go/tf-wildcard-import
# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top
import numpy as np
from tensorflow.python import pywrap_tensorflow
# Protocol buffers
from tensorflow.core.framework.graph_pb2 import *
from tensorflow.core.framework.node_def_pb2 import *
from tensorflow.core.framework.summary_pb2 import *
from tensorflow.core.framework.attr_value_pb2 import *
from tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.core.protobuf.config_pb2 import *
from tensorflow.core.protobuf.tensorflow_server_pb2 import *
from tensorflow.core.util.event_pb2 import *
# Framework
from tensorflow.python.framework.framework_lib import *
from tensorflow.python.framework.versions import *
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
# Session
from tensorflow.python.client.client_lib import *
# Ops
from tensorflow.python.ops.standard_ops import *
# pylint: enable=wildcard-import
# Bring in subpackages.
from tensorflow.python.estimator import estimator_lib as estimator
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.layers import layers
from tensorflow.python.ops import bitwise_ops as bitwise
from tensorflow.python.ops import image_ops as image
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import spectral_ops as spectral
from tensorflow.python.ops.distributions import distributions
from tensorflow.python.ops.losses import losses
from tensorflow.python.profiler import profiler
from tensorflow.python.user_ops import user_ops
from tensorflow.python.util import compat
from tensorflow.python.saved_model import saved_model
from tensorflow.python.summary import summary
# Import the names from python/training.py as train.Name.
from tensorflow.python.training import training as train
# Sub-package for performing i/o directly instead of via ops in a graph.
from tensorflow.python.lib.io import python_io
# Make some application and test modules available.
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import sysconfig
from tensorflow.python.platform import test
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.all_util import make_all
# Import modules whose docstrings contribute, for use by remove_undocumented
# below.
from tensorflow.python.client import client_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import framework_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix as confusion_matrix_m
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import session_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib, expose through
# documentation, or remove.
_allowed_symbols = [
'AttrValue',
'ConfigProto',
'ClusterDef',
'DeviceSpec',
'Event',
'GPUOptions',
'GRAPH_DEF_VERSION',
'GRAPH_DEF_VERSION_MIN_CONSUMER',
'GRAPH_DEF_VERSION_MIN_PRODUCER',
'GraphDef',
'GraphOptions',
'HistogramProto',
'LogMessage',
'MetaGraphDef',
'NameAttrList',
'NodeDef',
'OptimizerOptions',
'RunOptions',
'RunMetadata',
'SessionLog',
'Summary',
'SummaryMetadata',
'TensorInfo', # Used for tf.saved_model functionality.
]
# The following symbols are kept for compatibility. It is our plan
# to remove them in the future.
_allowed_symbols.extend([
'arg_max',
'arg_min',
'mul', # use tf.multiply instead.
'neg', # use tf.negative instead.
'sub', # use tf.subtract instead.
'create_partitioned_variables',
'deserialize_many_sparse',
'lin_space',
'list_diff', # Use tf.listdiff instead.
'listdiff', # Use tf.listdiff instead.
'parse_single_sequence_example',
'serialize_many_sparse',
'serialize_sparse',
'sparse_matmul', ## use tf.matmul instead.
])
# This is needed temporarily because we import it explicitly.
_allowed_symbols.extend([
'pywrap_tensorflow',
])
# Dtypes exported by framework/dtypes.py.
# TODO(cwhipkey): expose these through documentation.
_allowed_symbols.extend([
'QUANTIZED_DTYPES',
'bfloat16',
'bool',
'complex64',
'complex128',
'double',
'half',
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'int8',
'qint16',
'qint32',
'qint8',
'quint16',
'quint8',
'string',
'uint16',
'uint8',
'resource',
])
# Export modules and constants.
_allowed_symbols.extend([
'app',
'bitwise',
'compat',
'distributions',
'errors',
'estimator',
'feature_column',
'flags',
'gfile',
'graph_util',
'image',
'logging',
'losses',
'metrics',
'newaxis',
'nn',
'python_io',
'resource_loader',
'saved_model',
'sets',
'spectral',
'summary',
'sysconfig',
'test',
'train',
'user_ops',
'layers',
'profiler',
])
# Variables framework.versions:
_allowed_symbols.extend([
'VERSION',
'GIT_VERSION',
'COMPILER_VERSION',
])
# Remove all extra symbols that don't have a docstring or are not explicitly
# referenced in the whitelist.
remove_undocumented(__name__, _allowed_symbols, [
framework_lib, array_ops, check_ops, client_lib, compat, constant_op,
control_flow_ops, confusion_matrix_m, distributions,
functional_ops, histogram_ops, io_ops,
losses, math_ops, metrics, nn, resource_loader, sets, script_ops,
session_ops, sparse_ops, state_ops, string_ops, summary, tensor_array_ops,
train, layers, profiler
])
# Special dunders that we choose to export:
_exported_dunders = set([
'__version__',
'__git_version__',
'__compiler_version__',
])
# Expose symbols minus dunders, unless they are whitelisted above.
# This is necessary to export our dunders.
__all__ = [s for s in dir() if s in _exported_dunders or not s.startswith('_')]
| apache-2.0 |
nfxosp/platform_external_skia | tools/compare_codereview.py | 163 | 13309 | #!/usr/bin/python2
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Skia's Chromium Codereview Comparison Script.
This script takes two Codereview URLs, looks at the trybot results for
the two codereviews and compares the results.
Usage:
compare_codereview.py CONTROL_URL ROLL_URL
"""
import collections
import os
import re
import sys
import urllib2
import HTMLParser
class CodeReviewHTMLParser(HTMLParser.HTMLParser):
"""Parses CodeReview web page.
Use the CodeReviewHTMLParser.parse static function to make use of
this class.
This uses the HTMLParser class because it's the best thing in
Python's standard library. We need a little more power than a
regex. [Search for "You can't parse [X]HTML with regex." for more
information.
"""
# pylint: disable=I0011,R0904
@staticmethod
def parse(url):
"""Parses a CodeReview web pages.
Args:
url (string), a codereview URL like this:
'https://codereview.chromium.org/?????????'.
Returns:
A dictionary; the keys are bot_name strings, the values
are CodeReviewHTMLParser.Status objects
"""
parser = CodeReviewHTMLParser()
try:
parser.feed(urllib2.urlopen(url).read())
except (urllib2.URLError,):
print >> sys.stderr, 'Error getting', url
return None
parser.close()
return parser.statuses
# namedtuples are like lightweight structs in Python. The low
# overhead of a tuple, but the ease of use of an object.
Status = collections.namedtuple('Status', ['status', 'url'])
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self._id = None
self._status = None
self._href = None
self._anchor_data = ''
self._currently_parsing_trybotdiv = False
# statuses is a dictionary of CodeReviewHTMLParser.Status
self.statuses = {}
def handle_starttag(self, tag, attrs):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the start of a tag
(e.g. <div id="main">).
The tag argument is the name of the tag converted to lower
case. The attrs argument is a list of (name, value) pairs
containing the attributes found inside the tag's <>
brackets. The name will be translated to lower case, and
quotes in the value have been removed, and character and
entity references have been replaced.
For instance, for the tag <A HREF="http://www.cwi.nl/">, this
method would be called as handle_starttag('a', [('href',
'http://www.cwi.nl/')]).
[[end standard library documentation]]
"""
attrs = dict(attrs)
if tag == 'div':
# We are looking for <div id="tryjobdiv*">.
id_attr = attrs.get('id','')
if id_attr.startswith('tryjobdiv'):
self._id = id_attr
if (self._id and tag == 'a'
and 'build-result' in attrs.get('class', '').split()):
# If we are already inside a <div id="tryjobdiv*">, we
# look for a link if the form
# <a class="build-result" href="*">. Then we save the
# (non-standard) status attribute and the URL.
self._status = attrs.get('status')
self._href = attrs.get('href')
self._currently_parsing_trybotdiv = True
# Start saving anchor data.
def handle_data(self, data):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to process arbitrary data (e.g. text
nodes and the content of <script>...</script> and
<style>...</style>).
[[end standard library documentation]]
"""
# Save the text inside the <a></a> tags. Assume <a> tags
# aren't nested.
if self._currently_parsing_trybotdiv:
self._anchor_data += data
def handle_endtag(self, tag):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the end tag of an element
(e.g. </div>). The tag argument is the name of the tag
converted to lower case.
[[end standard library documentation]]
"""
if tag == 'a' and self._status:
# We take the accumulated self._anchor_data and save it as
# the bot name.
bot = self._anchor_data.strip()
stat = CodeReviewHTMLParser.Status(status=self._status,
url=self._href)
if bot:
# Add to accumulating dictionary.
self.statuses[bot] = stat
# Reset state to search for the next bot.
self._currently_parsing_trybotdiv = False
self._anchor_data = ''
self._status = None
self._href = None
class BuilderHTMLParser(HTMLParser.HTMLParser):
"""parses Trybot web pages.
Use the BuilderHTMLParser.parse static function to make use of
this class.
This uses the HTMLParser class because it's the best thing in
Python's standard library. We need a little more power than a
regex. [Search for "You can't parse [X]HTML with regex." for more
information.
"""
# pylint: disable=I0011,R0904
@staticmethod
def parse(url):
"""Parses a Trybot web page.
Args:
url (string), a trybot result URL.
Returns:
An array of BuilderHTMLParser.Results, each a description
of failure results, along with an optional url
"""
parser = BuilderHTMLParser()
try:
parser.feed(urllib2.urlopen(url).read())
except (urllib2.URLError,):
print >> sys.stderr, 'Error getting', url
return []
parser.close()
return parser.failure_results
Result = collections.namedtuple('Result', ['text', 'url'])
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.failure_results = []
self._current_failure_result = None
self._divlevel = None
self._li_level = 0
self._li_data = ''
self._current_failure = False
self._failure_results_url = ''
def handle_starttag(self, tag, attrs):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the start of a tag
(e.g. <div id="main">).
The tag argument is the name of the tag converted to lower
case. The attrs argument is a list of (name, value) pairs
containing the attributes found inside the tag's <>
brackets. The name will be translated to lower case, and
quotes in the value have been removed, and character and
entity references have been replaced.
For instance, for the tag <A HREF="http://www.cwi.nl/">, this
method would be called as handle_starttag('a', [('href',
'http://www.cwi.nl/')]).
[[end standard library documentation]]
"""
attrs = dict(attrs)
if tag == 'li':
# <li> tags can be nested. So we have to count the
# nest-level for backing out.
self._li_level += 1
return
if tag == 'div' and attrs.get('class') == 'failure result':
# We care about this sort of thing:
# <li>
# <li>
# <li>
# <div class="failure result">...</div>
# </li>
# </li>
# We want this text here.
# </li>
if self._li_level > 0:
self._current_failure = True # Tells us to keep text.
return
if tag == 'a' and self._current_failure:
href = attrs.get('href')
# Sometimes we want to keep the stdio url. We always
# return it, just in case.
if href.endswith('/logs/stdio'):
self._failure_results_url = href
def handle_data(self, data):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to process arbitrary data (e.g. text
nodes and the content of <script>...</script> and
<style>...</style>).
[[end standard library documentation]]
"""
if self._current_failure:
self._li_data += data
def handle_endtag(self, tag):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the end tag of an element
(e.g. </div>). The tag argument is the name of the tag
converted to lower case.
[[end standard library documentation]]
"""
if tag == 'li':
self._li_level -= 1
if 0 == self._li_level:
if self._current_failure:
result = self._li_data.strip()
first = result.split()[0]
if first:
result = re.sub(
r'^%s(\s+%s)+' % (first, first), first, result)
# Sometimes, it repeats the same thing
# multiple times.
result = re.sub(r'unexpected flaky.*', '', result)
# Remove some extra unnecessary text.
result = re.sub(r'\bpreamble\b', '', result)
result = re.sub(r'\bstdio\b', '', result)
url = self._failure_results_url
self.failure_results.append(
BuilderHTMLParser.Result(result, url))
self._current_failure_result = None
# Reset the state.
self._current_failure = False
self._li_data = ''
self._failure_results_url = ''
def printer(indent, string):
"""Print indented, wrapped text.
"""
def wrap_to(line, columns):
"""Wrap a line to the given number of columns, return a list
of strings.
"""
ret = []
nextline = ''
for word in line.split():
if nextline:
if len(nextline) + 1 + len(word) > columns:
ret.append(nextline)
nextline = word
else:
nextline += (' ' + word)
else:
nextline = word
if nextline:
ret.append(nextline)
return ret
out = sys.stdout
spacer = ' '
for line in string.split('\n'):
for i, wrapped_line in enumerate(wrap_to(line, 68 - (2 * indent))):
out.write(spacer * indent)
if i > 0:
out.write(spacer)
out.write(wrapped_line)
out.write('\n')
out.flush()
def main(control_url, roll_url, verbosity=1):
"""Compare two Codereview URLs
Args:
control_url, roll_url: (strings) URL of the format
https://codereview.chromium.org/?????????
verbosity: (int) verbose level. 0, 1, or 2.
"""
# pylint: disable=I0011,R0914,R0912
control = CodeReviewHTMLParser.parse(control_url)
roll = CodeReviewHTMLParser.parse(roll_url)
all_bots = set(control) & set(roll) # Set intersection.
if not all_bots:
print >> sys.stderr, (
'Error: control %s and roll %s have no common trybots.'
% (list(control), list(roll)))
return
control_name = '[control %s]' % control_url.split('/')[-1]
roll_name = '[roll %s]' % roll_url.split('/')[-1]
out = sys.stdout
for bot in sorted(all_bots):
if (roll[bot].status == 'success'):
if verbosity > 1:
printer(0, '==%s==' % bot)
printer(1, 'OK')
continue
if control[bot].status != 'failure' and roll[bot].status != 'failure':
continue
printer(0, '==%s==' % bot)
formatted_results = []
for (status, name, url) in [
(control[bot].status, control_name, control[bot].url),
( roll[bot].status, roll_name, roll[bot].url)]:
lines = []
if status == 'failure':
results = BuilderHTMLParser.parse(url)
for result in results:
formatted_result = re.sub(r'(\S*\.html) ', '\n__\g<1>\n', result.text)
# Strip runtimes.
formatted_result = re.sub(r'\(.*\)', '', formatted_result)
lines.append((2, formatted_result))
if ('compile' in result.text or '...and more' in result.text):
lines.append((3, re.sub('/[^/]*$', '/', url) + result.url))
formatted_results.append(lines)
identical = formatted_results[0] == formatted_results[1]
for (formatted_result, (status, name, url)) in zip(
formatted_results,
[(control[bot].status, control_name, control[bot].url),
(roll[bot].status, roll_name, roll[bot].url)]):
if status != 'failure' and not identical:
printer(1, name)
printer(2, status)
elif status == 'failure':
if identical:
printer(1, control_name + ' and ' + roll_name + ' failed identically')
else:
printer(1, name)
for (indent, line) in formatted_result:
printer(indent, line)
if identical:
break
out.write('\n')
if verbosity > 0:
# Print out summary of all of the bots.
out.write('%11s %11s %4s %s\n\n' %
('CONTROL', 'ROLL', 'DIFF', 'BOT'))
for bot in sorted(all_bots):
if roll[bot].status == 'success':
diff = ''
elif (control[bot].status == 'success' and
roll[bot].status == 'failure'):
diff = '!!!!'
elif ('pending' in control[bot].status or
'pending' in roll[bot].status):
diff = '....'
else:
diff = '****'
out.write('%11s %11s %4s %s\n' % (
control[bot].status, roll[bot].status, diff, bot))
out.write('\n')
out.flush()
if __name__ == '__main__':
if len(sys.argv) < 3:
print >> sys.stderr, __doc__
exit(1)
main(sys.argv[1], sys.argv[2],
int(os.environ.get('COMPARE_CODEREVIEW_VERBOSITY', 1)))
| bsd-3-clause |
daleooo/barrelfish | tools/harness/harness.py | 9 | 4172 | #
# Copyright (c) 2009-2011, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
#
import os
import types
import string
import datetime
import debug
RAW_FILE_NAME = 'raw.txt'
BOOT_FILE_NAME = 'bootlog.txt'
def run_test(build, machine, test, path):
# Open files for raw output from the victim and log data from the test
raw_file_name = os.path.join(path, RAW_FILE_NAME)
debug.verbose('open %s for raw output' % raw_file_name)
raw_file = open(raw_file_name, 'w')
# run the test, dumping the output to the raw file as we go
try:
debug.verbose('harness: setup test')
test.setup(build, machine, path)
debug.verbose('harness: run test')
starttime = datetime.datetime.now()
for out in test.run(build, machine, path):
# timedelta for the time this line was emitted from the start of the run
timestamp = datetime.datetime.now() - starttime
# format as string, discarding sub-second precision
timestr = str(timestamp).split('.', 1)[0]
# filter output line of control characters
filtered_out = filter(lambda c: c in string.printable, out.rstrip())
# debug filtered output along with timestamp
debug.debug('[%s] %s' % (timestr, filtered_out))
# log full raw line (without timestamp) to output file
raw_file.write(out)
debug.verbose('harness: output complete')
except KeyboardInterrupt:
# let the user know that we are on our way out
debug.error('Interrupted! Performing cleanup...')
raise
finally:
raw_file.close()
debug.verbose('harness: cleanup test')
test.cleanup(machine)
def process_results(test, path):
# open raw file for input processing
raw_file_name = os.path.join(path, RAW_FILE_NAME)
debug.verbose('open %s for raw input' % raw_file_name)
raw_file = open(raw_file_name, 'r')
try:
results = test.process_data(path, raw_file)
finally:
raw_file.close()
if not results:
debug.verbose('no results')
return True # no results, assume success
retval = True # everything OK
# Process raw.txt and make a bootlog.txt that begins with grubs
# output, avoids having encoding issues when viewing logfiles
boot_file_name = os.path.join(path, BOOT_FILE_NAME)
if os.path.exists(raw_file_name):
idx = 0
with open(raw_file_name, 'r') as rf:
lines = rf.readlines()
for idx, line in enumerate(lines):
if line.strip() == "root (nd)":
break
if idx > 0:
with open(boot_file_name, 'w') as wf:
wf.writelines(lines[idx:])
else:
debug.verbose('Magic string root (nd) not found, do not write bootlog.txt')
else:
debug.verbose('No file named %s exists. Do not create bootlog.txt.' % raw_file_name)
# if a single result, turn it into a list
if not isinstance(results, types.ListType):
results = [results]
for result in results:
# see if it passed
try:
passed = result.passed()
except NotImplementedError:
passed = None
if passed is False:
debug.log('Test %s FAILED' % test.name)
retval = False
elif passed:
debug.verbose('Test %s PASSED' % test.name)
# write it to a file
name = result.name if result.name else 'results'
data_file_name = os.path.join(path, name + '.dat')
debug.verbose('create %s for processed output' % data_file_name)
data_file = open(data_file_name, 'w')
try:
result.to_file(data_file)
data_file.close()
except NotImplementedError:
debug.verbose('no processed output, remove %s' % data_file_name)
data_file.close()
os.remove(data_file_name)
return retval
| mit |
inspyration/odoo | openerp/addons/base/tests/test_res_config.py | 398 | 3532 | import unittest2
import openerp
import openerp.tests.common as common
class test_res_config(common.TransactionCase):
def setUp(self):
super(test_res_config, self).setUp()
self.res_config = self.registry('res.config.settings')
# Define the test values
self.menu_xml_id = 'base.menu_action_res_users'
self.full_field_name = 'res.partner.lang'
self.error_msg = "WarningRedirect test string: %(field:res.partner.lang)s - %(menu:base.menu_action_res_users)s."
self.error_msg_wo_menu = "WarningRedirect test string: %(field:res.partner.lang)s."
# Note: see the get_config_warning() doc for a better example
# Fetch the expected values
module_name, menu_xml_id = self.menu_xml_id.split('.')
dummy, menu_id = self.registry('ir.model.data').get_object_reference(self.cr, self.uid, module_name, menu_xml_id)
ir_ui_menu = self.registry('ir.ui.menu').browse(self.cr, self.uid, menu_id, context=None)
model_name, field_name = self.full_field_name.rsplit('.', 1)
self.expected_path = ir_ui_menu.complete_name
self.expected_action_id = ir_ui_menu.action.id
self.expected_name = self.registry(model_name).fields_get(self.cr, self.uid, allfields=[field_name], context=None)[field_name]['string']
self.expected_final_error_msg = self.error_msg % {
'field:res.partner.lang': self.expected_name,
'menu:base.menu_action_res_users': self.expected_path
}
self.expected_final_error_msg_wo_menu = self.error_msg_wo_menu % {
'field:res.partner.lang': self.expected_name,
}
def test_00_get_option_path(self):
""" The get_option_path() method should return a tuple containing a string and an integer """
res = self.res_config.get_option_path(self.cr, self.uid, self.menu_xml_id, context=None)
# Check types
self.assertIsInstance(res, tuple)
self.assertEqual(len(res), 2, "The result should contain 2 elements")
self.assertIsInstance(res[0], basestring)
self.assertIsInstance(res[1], (int, long))
# Check returned values
self.assertEqual(res[0], self.expected_path)
self.assertEqual(res[1], self.expected_action_id)
def test_10_get_option_name(self):
""" The get_option_name() method should return a string """
res = self.res_config.get_option_name(self.cr, self.uid, self.full_field_name, context=None)
# Check type
self.assertIsInstance(res, basestring)
# Check returned value
self.assertEqual(res, self.expected_name)
def test_20_get_config_warning(self):
""" The get_config_warning() method should return a RedirectWarning """
res = self.res_config.get_config_warning(self.cr, self.error_msg, context=None)
# Check type
self.assertIsInstance(res, openerp.exceptions.RedirectWarning)
# Check returned value
self.assertEqual(res.args[0], self.expected_final_error_msg)
self.assertEqual(res.args[1], self.expected_action_id)
def test_30_get_config_warning_wo_menu(self):
""" The get_config_warning() method should return a Warning exception """
res = self.res_config.get_config_warning(self.cr, self.error_msg_wo_menu, context=None)
# Check type
self.assertIsInstance(res, openerp.exceptions.Warning)
# Check returned value
self.assertEqual(res.args[0], self.expected_final_error_msg_wo_menu)
| agpl-3.0 |
n0trax/ansible | lib/ansible/modules/network/netscaler/netscaler_gslb_service.py | 27 | 24002 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_gslb_service
short_description: Manage gslb service entities in Netscaler.
description:
- Manage gslb service entities in Netscaler.
version_added: "2.4"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
servicename:
description:
- >-
Name for the GSLB service. Must begin with an ASCII alphanumeric or underscore C(_) character, and
must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space, colon C(:), at C(@),
equals C(=), and hyphen C(-) characters. Can be changed after the GSLB service is created.
- >-
- "Minimum length = 1"
cnameentry:
description:
- "Canonical name of the GSLB service. Used in CNAME-based GSLB."
- "Minimum length = 1"
servername:
description:
- "Name of the server hosting the GSLB service."
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'NNTP'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'RADIUS'
- 'RDP'
- 'RTSP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
description:
- "Type of service to create."
port:
description:
- "Port on which the load balancing entity represented by this GSLB service listens."
- "Minimum value = 1"
- "Range 1 - 65535"
- "* in CLI is represented as 65535 in NITRO API"
publicip:
description:
- >-
The public IP address that a NAT device translates to the GSLB service's private IP address.
Optional.
publicport:
description:
- >-
The public port associated with the GSLB service's public IP address. The port is mapped to the
service's private port number. Applicable to the local GSLB service. Optional.
maxclient:
description:
- >-
The maximum number of open connections that the service can support at any given time. A GSLB service
whose connection count reaches the maximum is not considered when a GSLB decision is made, until the
connection count drops below the maximum.
- "Minimum value = C(0)"
- "Maximum value = C(4294967294)"
healthmonitor:
description:
- "Monitor the health of the GSLB service."
type: bool
sitename:
description:
- "Name of the GSLB site to which the service belongs."
- "Minimum length = 1"
cip:
choices:
- 'enabled'
- 'disabled'
description:
- >-
In the request that is forwarded to the GSLB service, insert a header that stores the client's IP
address. Client IP header insertion is used in connection-proxy based site persistence.
cipheader:
description:
- >-
Name for the HTTP header that stores the client's IP address. Used with the Client IP option. If
client IP header insertion is enabled on the service and a name is not specified for the header, the
NetScaler appliance uses the name specified by the cipHeader parameter in the set ns param command
or, in the GUI, the Client IP Header parameter in the Configure HTTP Parameters dialog box.
- "Minimum length = 1"
sitepersistence:
choices:
- 'ConnectionProxy'
- 'HTTPRedirect'
- 'NONE'
description:
- "Use cookie-based site persistence. Applicable only to C(HTTP) and C(SSL) GSLB services."
siteprefix:
description:
- >-
The site's prefix string. When the service is bound to a GSLB virtual server, a GSLB site domain is
generated internally for each bound service-domain pair by concatenating the site prefix of the
service and the name of the domain. If the special string NONE is specified, the site-prefix string
is unset. When implementing HTTP redirect site persistence, the NetScaler appliance redirects GSLB
requests to GSLB services by using their site domains.
clttimeout:
description:
- >-
Idle time, in seconds, after which a client connection is terminated. Applicable if connection proxy
based site persistence is used.
- "Minimum value = 0"
- "Maximum value = 31536000"
maxbandwidth:
description:
- >-
Integer specifying the maximum bandwidth allowed for the service. A GSLB service whose bandwidth
reaches the maximum is not considered when a GSLB decision is made, until its bandwidth consumption
drops below the maximum.
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with the GSLB service when its state transitions from UP to
DOWN. Do not enable this option for services that must complete their transactions. Applicable if
connection proxy based site persistence is used.
maxaaausers:
description:
- >-
Maximum number of SSL VPN users that can be logged on concurrently to the VPN virtual server that is
represented by this GSLB service. A GSLB service whose user count reaches the maximum is not
considered when a GSLB decision is made, until the count drops below the maximum.
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
monthreshold:
description:
- >-
Monitoring threshold value for the GSLB service. If the sum of the weights of the monitors that are
bound to this GSLB service and are in the UP state is not equal to or greater than this threshold
value, the service is marked as DOWN.
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
hashid:
description:
- "Unique hash identifier for the GSLB service, used by hash based load balancing methods."
- "Minimum value = C(1)"
comment:
description:
- "Any comments that you might want to associate with the GSLB service."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging appflow flow information."
ipaddress:
description:
- >-
IP address for the GSLB service. Should represent a load balancing, content switching, or VPN virtual
server on the NetScaler appliance, or the IP address of another load balancing device.
monitor_bindings:
description:
- Bind monitors to this gslb service
suboptions:
weight:
description:
- Weight to assign to the monitor-service binding.
- A larger number specifies a greater weight.
- Contributes to the monitoring threshold, which determines the state of the service.
- Minimum value = C(1)
- Maximum value = C(100)
monitor_name:
description:
- Monitor name.
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Setup gslb service 2
delegate_to: localhost
register: result
check_mode: "{{ check_mode }}"
netscaler_gslb_service:
operation: present
servicename: gslb-service-2
cnameentry: example.com
sitename: gslb-site-1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
msg:
description: Message detailing the failure reason
returned: failure
type: string
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dictionary
sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }"
'''
import copy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log,
loglines,
ensure_feature_is_enabled,
monkey_patch_nitro_api,
get_immutables_intersection,
)
try:
monkey_patch_nitro_api()
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice import gslbservice
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice_lbmonitor_binding import gslbservice_lbmonitor_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
def gslb_service_exists(client, module):
if gslbservice.count_filtered(client, 'servicename:%s' % module.params['servicename']) > 0:
return True
else:
return False
def gslb_service_identical(client, module, gslb_service_proxy):
gslb_service_list = gslbservice.get_filtered(client, 'servicename:%s' % module.params['servicename'])
diff_dict = gslb_service_proxy.diff_object(gslb_service_list[0])
# Ignore ip attribute missing
if 'ip' in diff_dict:
del diff_dict['ip']
if len(diff_dict) == 0:
return True
else:
return False
def get_actual_monitor_bindings(client, module):
log('get_actual_monitor_bindings')
# Get actual monitor bindings and index them by monitor_name
actual_monitor_bindings = {}
if gslbservice_lbmonitor_binding.count(client, servicename=module.params['servicename']) != 0:
# Get all monitor bindings associated with the named gslb vserver
fetched_bindings = gslbservice_lbmonitor_binding.get(client, servicename=module.params['servicename'])
# index by monitor name
for binding in fetched_bindings:
# complete_missing_attributes(binding, gslbservice_lbmonitor_binding_rw_attrs, fill_value=None)
actual_monitor_bindings[binding.monitor_name] = binding
return actual_monitor_bindings
def get_configured_monitor_bindings(client, module):
log('get_configured_monitor_bindings')
configured_monitor_proxys = {}
gslbservice_lbmonitor_binding_rw_attrs = [
'weight',
'servicename',
'monitor_name',
]
# Get configured monitor bindings and index them by monitor_name
if module.params['monitor_bindings'] is not None:
for configured_monitor_bindings in module.params['monitor_bindings']:
binding_values = copy.deepcopy(configured_monitor_bindings)
binding_values['servicename'] = module.params['servicename']
proxy = ConfigProxy(
actual=gslbservice_lbmonitor_binding(),
client=client,
attribute_values_dict=binding_values,
readwrite_attrs=gslbservice_lbmonitor_binding_rw_attrs,
readonly_attrs=[],
)
configured_monitor_proxys[configured_monitor_bindings['monitor_name']] = proxy
return configured_monitor_proxys
def monitor_bindings_identical(client, module):
log('monitor_bindings_identical')
actual_bindings = get_actual_monitor_bindings(client, module)
configured_proxys = get_configured_monitor_bindings(client, module)
actual_keyset = set(actual_bindings.keys())
configured_keyset = set(configured_proxys.keys())
symmetric_difference = actual_keyset ^ configured_keyset
if len(symmetric_difference) != 0:
log('Symmetric difference %s' % symmetric_difference)
return False
# Item for item equality test
for key, proxy in configured_proxys.items():
if not proxy.has_equal_attributes(actual_bindings[key]):
log('monitor binding difference %s' % proxy.diff_object(actual_bindings[key]))
return False
# Fallthrough to True result
return True
def sync_monitor_bindings(client, module):
log('sync_monitor_bindings')
actual_monitor_bindings = get_actual_monitor_bindings(client, module)
configured_monitor_proxys = get_configured_monitor_bindings(client, module)
# Delete actual bindings not in configured bindings
for monitor_name, actual_binding in actual_monitor_bindings.items():
if monitor_name not in configured_monitor_proxys.keys():
log('Deleting absent binding for monitor %s' % monitor_name)
log('dir is %s' % dir(actual_binding))
gslbservice_lbmonitor_binding.delete(client, actual_binding)
# Delete and re-add actual bindings that differ from configured
for proxy_key, binding_proxy in configured_monitor_proxys.items():
if proxy_key in actual_monitor_bindings:
actual_binding = actual_monitor_bindings[proxy_key]
if not binding_proxy.has_equal_attributes(actual_binding):
log('Deleting differing binding for monitor %s' % actual_binding.monitor_name)
log('dir %s' % dir(actual_binding))
log('attribute monitor_name %s' % getattr(actual_binding, 'monitor_name'))
log('attribute monitorname %s' % getattr(actual_binding, 'monitorname', None))
gslbservice_lbmonitor_binding.delete(client, actual_binding)
log('Adding anew binding for monitor %s' % binding_proxy.monitor_name)
binding_proxy.add()
# Add configured monitors that are missing from actual
for proxy_key, binding_proxy in configured_monitor_proxys.items():
if proxy_key not in actual_monitor_bindings.keys():
log('Adding monitor binding for monitor %s' % binding_proxy.monitor_name)
binding_proxy.add()
def diff_list(client, module, gslb_service_proxy):
gslb_service_list = gslbservice.get_filtered(client, 'servicename:%s' % module.params['servicename'])
diff_list = gslb_service_proxy.diff_object(gslb_service_list[0])
if 'ip' in diff_list:
del diff_list['ip']
return diff_list
def all_identical(client, module, gslb_service_proxy):
return gslb_service_identical(client, module, gslb_service_proxy) and monitor_bindings_identical(client, module)
def main():
module_specific_arguments = dict(
servicename=dict(type='str'),
cnameentry=dict(type='str'),
servername=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'NNTP',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'RADIUS',
'RDP',
'RTSP',
'MYSQL',
'MSSQL',
'ORACLE',
]
),
port=dict(type='int'),
publicip=dict(type='str'),
publicport=dict(type='int'),
maxclient=dict(type='float'),
healthmonitor=dict(type='bool'),
sitename=dict(type='str'),
cip=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
cipheader=dict(type='str'),
sitepersistence=dict(
type='str',
choices=[
'ConnectionProxy',
'HTTPRedirect',
'NONE',
]
),
siteprefix=dict(type='str'),
clttimeout=dict(type='float'),
maxbandwidth=dict(type='float'),
downstateflush=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
maxaaausers=dict(type='float'),
monthreshold=dict(type='float'),
hashid=dict(type='float'),
comment=dict(type='str'),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
ipaddress=dict(type='str'),
)
hand_inserted_arguments = dict(
monitor_bindings=dict(type='list'),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'servicename',
'cnameentry',
'ip',
'servername',
'servicetype',
'port',
'publicip',
'publicport',
'maxclient',
'healthmonitor',
'sitename',
'cip',
'cipheader',
'sitepersistence',
'siteprefix',
'clttimeout',
'maxbandwidth',
'downstateflush',
'maxaaausers',
'monthreshold',
'hashid',
'comment',
'appflowlog',
'ipaddress',
]
readonly_attrs = [
'gslb',
'svrstate',
'svreffgslbstate',
'gslbthreshold',
'gslbsvcstats',
'monstate',
'preferredlocation',
'monitor_state',
'statechangetimesec',
'tickssincelaststatechange',
'threshold',
'clmonowner',
'clmonview',
'__count',
]
immutable_attrs = [
'servicename',
'cnameentry',
'ip',
'servername',
'servicetype',
'port',
'sitename',
'state',
'cipheader',
'cookietimeout',
'clttimeout',
'svrtimeout',
'viewip',
'monitor_name_svc',
'newname',
]
transforms = {
'healthmonitor': ['bool_yes_no'],
'cip': [lambda v: v.upper()],
'downstateflush': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
}
# params = copy.deepcopy(module.params)
module.params['ip'] = module.params['ipaddress']
# Instantiate config proxy
gslb_service_proxy = ConfigProxy(
actual=gslbservice(),
client=client,
attribute_values_dict=module.params,
transforms=transforms,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
)
try:
ensure_feature_is_enabled(client, 'GSLB')
# Apply appropriate state
if module.params['state'] == 'present':
if not gslb_service_exists(client, module):
if not module.check_mode:
gslb_service_proxy.add()
sync_monitor_bindings(client, module)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not all_identical(client, module, gslb_service_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(gslb_service_proxy, diff_list(client, module, gslb_service_proxy).keys())
if immutables_changed != []:
module.fail_json(
msg='Cannot update immutable attributes %s' % (immutables_changed,),
diff=diff_list(client, module, gslb_service_proxy),
**module_result
)
# Update main configuration object
if not gslb_service_identical(client, module, gslb_service_proxy):
if not module.check_mode:
gslb_service_proxy.update()
# Update monitor bindigns
if not monitor_bindings_identical(client, module):
if not module.check_mode:
sync_monitor_bindings(client, module)
# Fallthrough to save and change status update
module_result['changed'] = True
if module.params['save_config']:
client.save_config()
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
if not gslb_service_exists(client, module):
module.fail_json(msg='GSLB service does not exist', **module_result)
if not gslb_service_identical(client, module, gslb_service_proxy):
module.fail_json(
msg='GSLB service differs from configured',
diff=diff_list(client, module, gslb_service_proxy),
**module_result
)
if not monitor_bindings_identical(client, module):
module.fail_json(
msg='Monitor bindings differ from configured',
diff=diff_list(client, module, gslb_service_proxy),
**module_result
)
elif module.params['state'] == 'absent':
if gslb_service_exists(client, module):
if not module.check_mode:
gslb_service_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
if gslb_service_exists(client, module):
module.fail_json(msg='GSLB service still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
haikuginger/urllib3 | urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| mit |
abo-abo/edx-platform | common/lib/xmodule/xmodule/html_module.py | 7 | 10456 | import copy
from fs.errors import ResourceNotFoundError
import logging
import os
import sys
from lxml import etree
from path import path
from pkg_resources import resource_string
from xblock.fields import Scope, String
from xmodule.editing_module import EditingDescriptor
from xmodule.html_checker import check_html
from xmodule.stringify import stringify_children
from xmodule.x_module import XModule
from xmodule.xml_module import XmlDescriptor, name_to_pathname
import textwrap
from xmodule.contentstore.content import StaticContent
log = logging.getLogger("mitx.courseware")
class HtmlFields(object):
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default="Text"
)
data = String(help="Html contents to display for this module", default=u"", scope=Scope.content)
source_code = String(help="Source code for LaTeX documents. This feature is not well-supported.", scope=Scope.settings)
class HtmlModule(HtmlFields, XModule):
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/html/display.coffee')
]
}
js_module_name = "HTMLModule"
css = {'scss': [resource_string(__name__, 'css/html/display.scss')]}
def get_html(self):
if self.system.anonymous_student_id:
return self.data.replace("%%USER_ID%%", self.system.anonymous_student_id)
return self.data
class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor):
"""
Module for putting raw html in a course
"""
mako_template = "widgets/html-edit.html"
module_class = HtmlModule
filename_extension = "xml"
template_dir_name = "html"
js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]}
js_module_name = "HTMLEditingDescriptor"
css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/html/edit.scss')]}
# VS[compat] TODO (cpennington): Delete this method once all fall 2012 course
# are being edited in the cms
@classmethod
def backcompat_paths(cls, path):
if path.endswith('.html.xml'):
path = path[:-9] + '.html' # backcompat--look for html instead of xml
if path.endswith('.html.html'):
path = path[:-5] # some people like to include .html in filenames..
candidates = []
while os.sep in path:
candidates.append(path)
_, _, path = path.partition(os.sep)
# also look for .html versions instead of .xml
nc = []
for candidate in candidates:
if candidate.endswith('.xml'):
nc.append(candidate[:-4] + '.html')
return candidates + nc
def get_context(self):
"""
an override to add in specific rendering context, in this case we need to
add in a base path to our c4x content addressing scheme
"""
_context = EditingDescriptor.get_context(self)
# Add some specific HTML rendering context when editing HTML modules where we pass
# the root /c4x/ url for assets. This allows client-side substitutions to occur.
_context.update({'base_asset_url': StaticContent.get_base_url_path_for_course_assets(self.location) + '/'})
return _context
# NOTE: html descriptors are special. We do not want to parse and
# export them ourselves, because that can break things (e.g. lxml
# adds body tags when it exports, but they should just be html
# snippets that will be included in the middle of pages.
@classmethod
def load_definition(cls, xml_object, system, location):
'''Load a descriptor from the specified xml_object:
If there is a filename attribute, load it as a string, and
log a warning if it is not parseable by etree.HTMLParser.
If there is not a filename attribute, the definition is the body
of the xml_object, without the root tag (do not want <html> in the
middle of a page)
'''
filename = xml_object.get('filename')
if filename is None:
definition_xml = copy.deepcopy(xml_object)
cls.clean_metadata_from_xml(definition_xml)
return {'data': stringify_children(definition_xml)}, []
else:
# html is special. cls.filename_extension is 'xml', but
# if 'filename' is in the definition, that means to load
# from .html
# 'filename' in html pointers is a relative path
# (not same as 'html/blah.html' when the pointer is in a directory itself)
pointer_path = "{category}/{url_path}".format(
category='html',
url_path=name_to_pathname(location.name)
)
base = path(pointer_path).dirname()
# log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename))
filepath = "{base}/{name}.html".format(base=base, name=filename)
# log.debug("looking for html file for {0} at {1}".format(location, filepath))
# VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path,
# give the class a chance to fix it up. The file will be written out
# again in the correct format. This should go away once the CMS is
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath):
candidates = cls.backcompat_paths(filepath)
# log.debug("candidates = {0}".format(candidates))
for candidate in candidates:
if system.resources_fs.exists(candidate):
filepath = candidate
break
try:
with system.resources_fs.open(filepath) as file:
html = file.read().decode('utf-8')
# Log a warning if we can't parse the file, but don't error
if not check_html(html) and len(html) > 0:
msg = "Couldn't parse html in {0}, content = {1}".format(filepath, html)
log.warning(msg)
system.error_tracker("Warning: " + msg)
definition = {'data': html}
# TODO (ichuang): remove this after migration
# for Fall 2012 LMS migration: keep filename (and unmangled filename)
definition['filename'] = [filepath, filename]
return definition, []
except (ResourceNotFoundError) as err:
msg = 'Unable to load file contents at path {0}: {1} '.format(
filepath, err)
# add more info and re-raise
raise Exception(msg), None, sys.exc_info()[2]
# TODO (vshnayder): make export put things in the right places.
def definition_to_xml(self, resource_fs):
''' Write <html filename="" [meta-attrs="..."]> to filename.xml, and the html
string to filename.html.
'''
# Write html to file, return an empty tag
pathname = name_to_pathname(self.url_name)
filepath = u'{category}/{pathname}.html'.format(
category=self.category,
pathname=pathname
)
resource_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True)
with resource_fs.open(filepath, 'w') as filestream:
html_data = self.data.encode('utf-8')
filestream.write(html_data)
# write out the relative name
relname = path(pathname).basename()
elt = etree.Element('html')
elt.set("filename", relname)
return elt
class AboutFields(object):
display_name = String(
help="Display name for this module",
scope=Scope.settings,
default="overview",
)
data = String(
help="Html contents to display for this module",
default="",
scope=Scope.content
)
class AboutModule(AboutFields, HtmlModule):
"""
Overriding defaults but otherwise treated as HtmlModule.
"""
pass
class AboutDescriptor(AboutFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = "about"
module_class = AboutModule
class StaticTabFields(object):
"""
The overrides for Static Tabs
"""
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
default="Empty",
)
data = String(
default=textwrap.dedent("""\
<p>This is where you can add additional pages to your courseware. Click the 'edit' button to begin editing.</p>
"""),
scope=Scope.content,
help="HTML for the additional pages"
)
class StaticTabModule(StaticTabFields, HtmlModule):
"""
Supports the field overrides
"""
pass
class StaticTabDescriptor(StaticTabFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = None
module_class = StaticTabModule
class CourseInfoFields(object):
"""
Field overrides
"""
data = String(
help="Html contents to display for this module",
default="<ol></ol>",
scope=Scope.content
)
class CourseInfoModule(CourseInfoFields, HtmlModule):
"""
Just to support xblock field overrides
"""
pass
class CourseInfoDescriptor(CourseInfoFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = None
module_class = CourseInfoModule
| agpl-3.0 |
Kkevsterrr/backdoorme | backdoors/shell/__pupy/rpyc/core/service.py | 16 | 5817 | """
Services are the heart of RPyC: each side of the connection exposes a *service*,
which define the capabilities available to the other side.
Note that the services by both parties need not be symmetric, e.g., one side may
exposed *service A*, while the other may expose *service B*. As long as the two
can interoperate, you're good to go.
"""
from rpyc.lib.compat import execute, is_py3k
class Service(object):
"""The service base-class. Derive from this class to implement custom RPyC
services:
* The name of the class implementing the ``Foo`` service should match the
pattern ``FooService`` (suffixed by the word 'Service') ::
class FooService(Service):
pass
FooService.get_service_name() # 'FOO'
FooService.get_service_aliases() # ['FOO']
* To supply a different name or aliases, use the ``ALIASES`` class attribute ::
class Foobar(Service):
ALIASES = ["foo", "bar", "lalaland"]
Foobar.get_service_name() # 'FOO'
Foobar.get_service_aliases() # ['FOO', 'BAR', 'LALALAND']
* Override :func:`on_connect` to perform custom initialization
* Override :func:`on_disconnect` to perform custom finalization
* To add exposed methods or attributes, simply define them normally,
but prefix their name by ``exposed_``, e.g. ::
class FooService(Service):
def exposed_add(self, x, y):
return x + y
* All other names (not prefixed by ``exposed_``) are local (not accessible
to the other party)
.. note::
You can override ``_rpyc_getattr``, ``_rpyc_setattr`` and ``_rpyc_delattr``
to change attribute lookup -- but beware of possible **security implications!**
"""
__slots__ = ["_conn"]
ALIASES = ()
def __init__(self, conn):
self._conn = conn
def on_connect(self):
"""called when the connection is established"""
pass
def on_disconnect(self):
"""called when the connection had already terminated for cleanup
(must not perform any IO on the connection)"""
pass
def _rpyc_getattr(self, name):
if name.startswith("exposed_"):
name = name
else:
name = "exposed_" + name
return getattr(self, name)
def _rpyc_delattr(self, name):
raise AttributeError("access denied")
def _rpyc_setattr(self, name, value):
raise AttributeError("access denied")
@classmethod
def get_service_aliases(cls):
"""returns a list of the aliases of this service"""
if cls.ALIASES:
return tuple(str(n).upper() for n in cls.ALIASES)
name = cls.__name__.upper()
if name.endswith("SERVICE"):
name = name[:-7]
return (name,)
@classmethod
def get_service_name(cls):
"""returns the canonical name of the service (which is its first
alias)"""
return cls.get_service_aliases()[0]
exposed_get_service_aliases = get_service_aliases
exposed_get_service_name = get_service_name
class VoidService(Service):
"""void service - an do-nothing service"""
__slots__ = ()
class ModuleNamespace(object):
"""used by the :class:`SlaveService` to implement the magical
'module namespace'"""
__slots__ = ["__getmodule", "__cache", "__weakref__"]
def __init__(self, getmodule):
self.__getmodule = getmodule
self.__cache = {}
def __contains__(self, name):
try:
self[name]
except ImportError:
return False
else:
return True
def __getitem__(self, name):
if type(name) is tuple:
name = ".".join(name)
if name not in self.__cache:
self.__cache[name] = self.__getmodule(name)
return self.__cache[name]
def __getattr__(self, name):
return self[name]
class SlaveService(Service):
"""The SlaveService allows the other side to perform arbitrary imports and
execution arbitrary code on the server. This is provided for compatibility
with the classic RPyC (2.6) modus operandi.
This service is very useful in local, secure networks, but it exposes
a **major security risk** otherwise."""
__slots__ = ["exposed_namespace"]
def on_connect(self):
self.exposed_namespace = {}
self._conn._config.update(dict(
allow_all_attrs = True,
allow_pickle = True,
allow_getattr = True,
allow_setattr = True,
allow_delattr = True,
import_custom_exceptions = True,
instantiate_custom_exceptions = True,
instantiate_oldstyle_exceptions = True,
))
# shortcuts
self._conn.modules = ModuleNamespace(self._conn.root.getmodule)
self._conn.eval = self._conn.root.eval
self._conn.execute = self._conn.root.execute
self._conn.namespace = self._conn.root.namespace
if is_py3k:
self._conn.builtin = self._conn.modules.builtins
else:
self._conn.builtin = self._conn.modules.__builtin__
self._conn.builtins = self._conn.builtin
def exposed_execute(self, text):
"""execute arbitrary code (using ``exec``)"""
execute(text, self.exposed_namespace)
def exposed_eval(self, text):
"""evaluate arbitrary code (using ``eval``)"""
return eval(text, self.exposed_namespace)
def exposed_getmodule(self, name):
"""imports an arbitrary module"""
return __import__(name, None, None, "*")
def exposed_getconn(self):
"""returns the local connection instance to the other side"""
return self._conn
| mit |
adoosii/edx-platform | lms/djangoapps/lms_xblock/mixin.py | 37 | 7658 | """
Namespace that defines fields common to all blocks used in the LMS
"""
from lazy import lazy
from xblock.fields import Boolean, Scope, String, XBlockMixin, Dict
from xblock.validation import ValidationMessage
from xmodule.modulestore.inheritance import UserPartitionList
from xmodule.partitions.partitions import NoSuchUserPartitionError, NoSuchUserPartitionGroupError
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class GroupAccessDict(Dict):
"""Special Dict class for serializing the group_access field"""
def from_json(self, access_dict):
if access_dict is not None:
return {int(k): access_dict[k] for k in access_dict}
def to_json(self, access_dict):
if access_dict is not None:
return {unicode(k): access_dict[k] for k in access_dict}
class LmsBlockMixin(XBlockMixin):
"""
Mixin that defines fields common to all blocks used in the LMS
"""
hide_from_toc = Boolean(
help=_("Whether to display this module in the table of contents"),
default=False,
scope=Scope.settings
)
format = String(
# Translators: "TOC" stands for "Table of Contents"
help=_("What format this module is in (used for deciding which "
"grader to apply, and what to show in the TOC)"),
scope=Scope.settings,
)
chrome = String(
display_name=_("Courseware Chrome"),
help=_("Enter the chrome, or navigation tools, to use for the XBlock in the LMS. Valid values are: \n"
"\"chromeless\" -- to not use tabs or the accordion; \n"
"\"tabs\" -- to use tabs only; \n"
"\"accordion\" -- to use the accordion only; or \n"
"\"tabs,accordion\" -- to use tabs and the accordion."),
scope=Scope.settings,
default=None,
)
default_tab = String(
display_name=_("Default Tab"),
help=_("Enter the tab that is selected in the XBlock. If not set, the Courseware tab is selected."),
scope=Scope.settings,
default=None,
)
source_file = String(
display_name=_("LaTeX Source File Name"),
help=_("Enter the source file name for LaTeX."),
scope=Scope.settings,
deprecated=True
)
ispublic = Boolean(
display_name=_("Course Is Public"),
help=_("Enter true or false. If true, the course is open to the public. If false, the course is open only to admins."),
scope=Scope.settings
)
visible_to_staff_only = Boolean(
help=_("If true, can be seen only by course staff, regardless of start date."),
default=False,
scope=Scope.settings,
)
group_access = GroupAccessDict(
help=_(
"A dictionary that maps which groups can be shown this block. The keys "
"are group configuration ids and the values are a list of group IDs. "
"If there is no key for a group configuration or if the set of group IDs "
"is empty then the block is considered visible to all. Note that this "
"field is ignored if the block is visible_to_staff_only."
),
default={},
scope=Scope.settings,
)
@lazy
def merged_group_access(self):
"""
This computes access to a block's group_access rules in the context of its position
within the courseware structure, in the form of a lazily-computed attribute.
Each block's group_access rule is merged recursively with its parent's, guaranteeing
that any rule in a parent block will be enforced on descendants, even if a descendant
also defined its own access rules. The return value is always a dict, with the same
structure as that of the group_access field.
When merging access rules results in a case where all groups are denied access in a
user partition (which effectively denies access to that block for all students),
the special value False will be returned for that user partition key.
"""
parent = self.get_parent()
if not parent:
return self.group_access or {}
merged_access = parent.merged_group_access.copy()
if self.group_access is not None:
for partition_id, group_ids in self.group_access.items():
if group_ids: # skip if the "local" group_access for this partition is None or empty.
if partition_id in merged_access:
if merged_access[partition_id] is False:
# special case - means somewhere up the hierarchy, merged access rules have eliminated
# all group_ids from this partition, so there's no possible intersection.
continue
# otherwise, if the parent defines group access rules for this partition,
# intersect with the local ones.
merged_access[partition_id] = list(
set(merged_access[partition_id]).intersection(group_ids)
) or False
else:
# add the group access rules for this partition to the merged set of rules.
merged_access[partition_id] = group_ids
return merged_access
# Specified here so we can see what the value set at the course-level is.
user_partitions = UserPartitionList(
help=_("The list of group configurations for partitioning students in content experiments."),
default=[],
scope=Scope.settings
)
def _get_user_partition(self, user_partition_id):
"""
Returns the user partition with the specified id. Raises
`NoSuchUserPartitionError` if the lookup fails.
"""
for user_partition in self.user_partitions:
if user_partition.id == user_partition_id:
return user_partition
raise NoSuchUserPartitionError("could not find a UserPartition with ID [{}]".format(user_partition_id))
def validate(self):
"""
Validates the state of this xblock instance.
"""
_ = self.runtime.service(self, "i18n").ugettext # pylint: disable=redefined-outer-name
validation = super(LmsBlockMixin, self).validate()
has_invalid_user_partitions = False
has_invalid_groups = False
for user_partition_id, group_ids in self.group_access.iteritems():
try:
user_partition = self._get_user_partition(user_partition_id)
except NoSuchUserPartitionError:
has_invalid_user_partitions = True
else:
# Skip the validation check if the partition has been disabled
if user_partition.active:
for group_id in group_ids:
try:
user_partition.get_group(group_id)
except NoSuchUserPartitionGroupError:
has_invalid_groups = True
if has_invalid_user_partitions:
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_(u"This component refers to deleted or invalid content group configurations.")
)
)
if has_invalid_groups:
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_(u"This component refers to deleted or invalid content groups.")
)
)
return validation
| agpl-3.0 |
andrewv587/pycharm-project | keras_module/data_draw/draw_pascal.py | 1 | 2998 | import matplotlib.pylab as plt
import numpy as np
from ..image_utils import deprocess_image
def test_img(pascal_iter, index, pasacl_train):
datum, label = pascal_iter.get_example(index)
img = pasacl_train.visualize_example(index)
print(img.shape)
plt.figure()
plt.imshow(pasacl_train.datum_to_img(datum))
plt.show()
plt.figure()
plt.imshow(img)
plt.show()
print(datum.shape)
print(label.shape)
return
def draw_batch_images(generator, pasacl_train, batch_size=5):
my_gen_datas, my_gen_labels = generator.next()
plt.figure(figsize=(124, 124))
for index in range(batch_size):
datum = my_gen_datas[index]
label = my_gen_labels[index]
label = np.argmax(label, axis=-1)
img_pred = pasacl_train.visualize_pairs(datum, label)
plt.subplot(2, batch_size, index + 1)
plt.imshow(pasacl_train.datum_to_img(datum))
plt.subplot(2, batch_size, batch_size + index + 1)
plt.imshow(img_pred)
plt.show()
return
def draw_images_pair(img1_datas, img2_datas, index_pro=1, batch_size=5, is_save=True, prefix='st-',is_block=False):
plt.figure(figsize=(100, 40))
for index in range(batch_size):
datum = img1_datas[index].copy()
datum = deprocess_image(datum)
label = img2_datas[index].copy()
label = deprocess_image(label)
plt.subplot(2, batch_size, index + 1)
plt.imshow(datum)
plt.subplot(2, batch_size, batch_size + index + 1)
plt.imshow(label)
if is_save:
plt.savefig(prefix + str(index_pro) + '.jpg')
else:
plt.show(block=is_block)
return
def draw_batch_label(datas, label_pred, label_true, pasacl_train, batch_size=6):
plt.figure(figsize=(124, 124))
for inner_index in range(batch_size):
datum = datas[inner_index]
label_pred_datum = label_pred[inner_index]
label_pred_datum = np.argmax(label_pred_datum, axis=-1)
label_true_datum = label_true[inner_index]
label_true_datum = np.argmax(label_true_datum, axis=-1)
tmp_img_pred = pasacl_train.visualize_pairs(datum, label_pred_datum)
tmp_img_true = pasacl_train.visualize_pairs(datum, label_true_datum)
plt.subplot(2, batch_size, inner_index + 1)
plt.imshow(tmp_img_true)
plt.subplot(2, batch_size, batch_size + inner_index + 1)
plt.imshow(tmp_img_pred)
plt.show()
return
def draw_segment_pair(data_labels_pred, data_labels, batch_size=5):
plt.figure(figsize=(124, 124))
for index in range(batch_size):
label_pred = data_labels_pred[index]
label_pred = np.argmax(label_pred, axis=-1)
# label_pred += 1
label = data_labels[index]
label = np.argmax(label, axis=-1)
# label += 1
plt.subplot(2, batch_size, index + 1)
plt.imshow(label_pred)
plt.subplot(2, batch_size, batch_size + index + 1)
plt.imshow(label)
plt.show()
return
| apache-2.0 |
Vignesh2208/Awlsim | awlsim/core/instructions/insn_x.py | 2 | 1431 | # -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_X(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_X, rawInsn)
self.assertOpCount(1)
def run(self):
#@cy cdef S7StatusWord s
s = self.cpu.statusWord
s.STA = self.cpu.fetch(self.ops[0], {1,})
if s.NER:
s.VKE ^= s.STA
else:
s.VKE = s.STA
s.OR, s.NER = 0, 1
| gpl-2.0 |
srikantbmandal/ansible | test/units/mock/procenv.py | 141 | 2636 | # (c) 2016, Matt Davis <mdavis@ansible.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import json
from contextlib import contextmanager
from io import BytesIO, StringIO
from ansible.compat.tests import unittest
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_bytes
@contextmanager
def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
"""
context manager that temporarily masks the test runner's values for stdin and argv
"""
real_stdin = sys.stdin
real_argv = sys.argv
if PY3:
fake_stream = StringIO(stdin_data)
fake_stream.buffer = BytesIO(to_bytes(stdin_data))
else:
fake_stream = BytesIO(to_bytes(stdin_data))
try:
sys.stdin = fake_stream
sys.argv = argv_data
yield
finally:
sys.stdin = real_stdin
sys.argv = real_argv
@contextmanager
def swap_stdout():
"""
context manager that temporarily replaces stdout for tests that need to verify output
"""
old_stdout = sys.stdout
if PY3:
fake_stream = StringIO()
else:
fake_stream = BytesIO()
try:
sys.stdout = fake_stream
yield fake_stream
finally:
sys.stdout = old_stdout
class ModuleTestCase(unittest.TestCase):
def setUp(self, module_args=None):
if module_args is None:
module_args = {}
args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
def tearDown(self):
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
| gpl-3.0 |
gustavoanatoly/flink | flink-libraries/flink-python/src/main/python/org/apache/flink/python/api/flink/functions/FilterFunction.py | 22 | 1482 | # ###############################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from flink.functions import Function
class FilterFunction(Function.Function):
def __init__(self):
super(FilterFunction, self).__init__()
def _run(self):
collector = self._collector
function = self.filter
for value in self._iterator:
if function(value):
collector.collect(value)
collector._close()
def collect(self, value):
if self.filter(value):
self._collector.collect(value)
def filter(self, value):
pass
| apache-2.0 |
alex/tablib | tablib/packages/xlrd3/formatting.py | 54 | 45677 | # Module for formatting information.
#
# Copyright © 2005-2008 Stephen John Machin, Lingfo Pty Ltd
# Copyright © 2005-2009 Stephen John Machin, Lingfo Pty Ltd
# This module is part of the xlrd3 package, which is released under
# a BSD-style licence.
# No part of the content of this file was derived from the works of David Giffin.
# 2009-05-31 SJM Fixed problem with non-zero reserved bits in some STYLE records in Mac Excel files
# 2008-08-03 SJM Ignore PALETTE record when Book.formatting_info is false
# 2008-08-03 SJM Tolerate up to 4 bytes trailing junk on PALETTE record
# 2008-05-10 SJM Do some XF checks only when Book.formatting_info is true
# 2008-02-08 SJM Preparation for Excel 2.0 support
# 2008-02-03 SJM Another tweak to is_date_format_string()
# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files.
# 2007-10-13 SJM Warning: style XF whose parent XF index != 0xFFF
# 2007-09-08 SJM Work around corrupt STYLE record
# 2007-07-11 SJM Allow for BIFF2/3-style FORMAT record in BIFF4/8 file
DEBUG = False
import copy
import re
from struct import unpack
from .biffh import BaseObject, unpack_unicode, unpack_string, \
upkbits, upkbitsL, fprintf, \
FUN, FDT, FNU, FGE, FTX, XL_CELL_NUMBER, XL_CELL_DATE, \
XL_FORMAT, XL_FORMAT2, \
XLRDError
excel_default_palette_b5 = (
( 0, 0, 0), (255, 255, 255), (255, 0, 0), ( 0, 255, 0),
( 0, 0, 255), (255, 255, 0), (255, 0, 255), ( 0, 255, 255),
(128, 0, 0), ( 0, 128, 0), ( 0, 0, 128), (128, 128, 0),
(128, 0, 128), ( 0, 128, 128), (192, 192, 192), (128, 128, 128),
(153, 153, 255), (153, 51, 102), (255, 255, 204), (204, 255, 255),
(102, 0, 102), (255, 128, 128), ( 0, 102, 204), (204, 204, 255),
( 0, 0, 128), (255, 0, 255), (255, 255, 0), ( 0, 255, 255),
(128, 0, 128), (128, 0, 0), ( 0, 128, 128), ( 0, 0, 255),
( 0, 204, 255), (204, 255, 255), (204, 255, 204), (255, 255, 153),
(153, 204, 255), (255, 153, 204), (204, 153, 255), (227, 227, 227),
( 51, 102, 255), ( 51, 204, 204), (153, 204, 0), (255, 204, 0),
(255, 153, 0), (255, 102, 0), (102, 102, 153), (150, 150, 150),
( 0, 51, 102), ( 51, 153, 102), ( 0, 51, 0), ( 51, 51, 0),
(153, 51, 0), (153, 51, 102), ( 51, 51, 153), ( 51, 51, 51),
)
excel_default_palette_b2 = excel_default_palette_b5[:16]
# Following two tables borrowed from Gnumeric 1.4 source.
excel_default_palette_b5_gnumeric_14 = (
#### dodgy; didn't match Excel results
( 0, 0, 0), (255,255,255), (255, 0, 0), ( 0,255, 0),
( 0, 0,255), (255,255, 0), (255, 0,255), ( 0,255,255),
(128, 0, 0), ( 0,128, 0), ( 0, 0,128), (128,128, 0),
(128, 0,128), ( 0,128,128), (192,192,192), (128,128,128),
(128,128,255), (128, 32, 96), (255,255,192), (160,224,224),
( 96, 0,128), (255,128,128), ( 0,128,192), (192,192,255),
( 0, 0,128), (255, 0,255), (255,255, 0), ( 0,255,255),
(128, 0,128), (128, 0, 0), ( 0,128,128), ( 0, 0,255),
( 0,204,255), (105,255,255), (204,255,204), (255,255,153),
(166,202,240), (204,156,204), (204,153,255), (227,227,227),
( 51,102,255), ( 51,204,204), ( 51,153, 51), (153,153, 51),
(153,102, 51), (153,102,102), (102,102,153), (150,150,150),
( 51, 51,204), ( 51,102,102), ( 0, 51, 0), ( 51, 51, 0),
(102, 51, 0), (153, 51,102), ( 51, 51,153), ( 66, 66, 66),
)
excel_default_palette_b8 = ( # (red, green, blue)
( 0, 0, 0), (255,255,255), (255, 0, 0), ( 0,255, 0),
( 0, 0,255), (255,255, 0), (255, 0,255), ( 0,255,255),
(128, 0, 0), ( 0,128, 0), ( 0, 0,128), (128,128, 0),
(128, 0,128), ( 0,128,128), (192,192,192), (128,128,128),
(153,153,255), (153, 51,102), (255,255,204), (204,255,255),
(102, 0,102), (255,128,128), ( 0,102,204), (204,204,255),
( 0, 0,128), (255, 0,255), (255,255, 0), ( 0,255,255),
(128, 0,128), (128, 0, 0), ( 0,128,128), ( 0, 0,255),
( 0,204,255), (204,255,255), (204,255,204), (255,255,153),
(153,204,255), (255,153,204), (204,153,255), (255,204,153),
( 51,102,255), ( 51,204,204), (153,204, 0), (255,204, 0),
(255,153, 0), (255,102, 0), (102,102,153), (150,150,150),
( 0, 51,102), ( 51,153,102), ( 0, 51, 0), ( 51, 51, 0),
(153, 51, 0), (153, 51,102), ( 51, 51,153), ( 51, 51, 51),
)
default_palette = {
80: excel_default_palette_b8,
70: excel_default_palette_b5,
50: excel_default_palette_b5,
45: excel_default_palette_b2,
40: excel_default_palette_b2,
30: excel_default_palette_b2,
21: excel_default_palette_b2,
20: excel_default_palette_b2,
}
"""
00H = Normal
01H = RowLevel_lv (see next field)
02H = ColLevel_lv (see next field)
03H = Comma
04H = Currency
05H = Percent
06H = Comma [0] (BIFF4-BIFF8)
07H = Currency [0] (BIFF4-BIFF8)
08H = Hyperlink (BIFF8)
09H = Followed Hyperlink (BIFF8)
"""
built_in_style_names = [
"Normal",
"RowLevel_",
"ColLevel_",
"Comma",
"Currency",
"Percent",
"Comma [0]",
"Currency [0]",
"Hyperlink",
"Followed Hyperlink",
]
def initialise_colour_map(book):
book.colour_map = {}
book.colour_indexes_used = {}
if not book.formatting_info:
return
# Add the 8 invariant colours
for i in range(8):
book.colour_map[i] = excel_default_palette_b8[i]
# Add the default palette depending on the version
dpal = default_palette[book.biff_version]
ndpal = len(dpal)
for i in range(ndpal):
book.colour_map[i+8] = dpal[i]
# Add the specials -- None means the RGB value is not known
# System window text colour for border lines
book.colour_map[ndpal+8] = None
# System window background colour for pattern background
book.colour_map[ndpal+8+1] = None #
for ci in (
0x51, # System ToolTip text colour (used in note objects)
0x7FFF, # 32767, system window text colour for fonts
):
book.colour_map[ci] = None
def nearest_colour_index(colour_map, rgb, debug=0):
# General purpose function. Uses Euclidean distance.
# So far used only for pre-BIFF8 WINDOW2 record.
# Doesn't have to be fast.
# Doesn't have to be fancy.
best_metric = 3 * 256 * 256
best_colourx = 0
for colourx, cand_rgb in list(colour_map.items()):
if cand_rgb is None:
continue
metric = 0
for v1, v2 in zip(rgb, cand_rgb):
metric += (v1 - v2) * (v1 - v2)
if metric < best_metric:
best_metric = metric
best_colourx = colourx
if metric == 0:
break
if debug:
print("nearest_colour_index for %r is %r -> %r; best_metric is %d" \
% (rgb, best_colourx, colour_map[best_colourx], best_metric))
return best_colourx
# This mixin class exists solely so that Format, Font, and XF.... objects
# can be compared by value of their attributes.
class EqNeAttrs(object):
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
# An Excel "font" contains the details of not only what is normally
# considered a font, but also several other display attributes.
# Items correspond to those in the Excel UI's Format/Cells/Font tab.
# - New in version 0.6.1
class Font(BaseObject, EqNeAttrs):
# 1 = Characters are bold. Redundant; see "weight" attribute.
bold = 0
# Values: 0 = ANSI Latin, 1 = System default, 2 = Symbol,
# 77 = Apple Roman,
# 128 = ANSI Japanese Shift-JIS,
# 129 = ANSI Korean (Hangul),
# 130 = ANSI Korean (Johab),
# 134 = ANSI Chinese Simplified GBK,
# 136 = ANSI Chinese Traditional BIG5,
# 161 = ANSI Greek,
# 162 = ANSI Turkish,
# 163 = ANSI Vietnamese,
# 177 = ANSI Hebrew,
# 178 = ANSI Arabic,
# 186 = ANSI Baltic,
# 204 = ANSI Cyrillic,
# 222 = ANSI Thai,
# 238 = ANSI Latin II (Central European),
# 255 = OEM Latin I
character_set = 0
# An explanation of "colour index" is given in the Formatting
# section at the start of this document.
colour_index = 0
# 1 = Superscript, 2 = Subscript.
escapement = 0
# 0 = None (unknown or don't care)<br />
# 1 = Roman (variable width, serifed)<br />
# 2 = Swiss (variable width, sans-serifed)<br />
# 3 = Modern (fixed width, serifed or sans-serifed)<br />
# 4 = Script (cursive)<br />
# 5 = Decorative (specialised, for example Old English, Fraktur)
family = 0
# The 0-based index used to refer to this Font() instance.
# Note that index 4 is never used; xlrd supplies a dummy place-holder.
font_index = 0
# Height of the font (in twips). A twip = 1/20 of a point.
height = 0
# 1 = Characters are italic.
italic = 0
# The name of the font. Example: u"Arial"
name = ""
# 1 = Characters are struck out.
struck_out = 0
# 0 = None<br />
# 1 = Single; 0x21 (33) = Single accounting<br />
# 2 = Double; 0x22 (34) = Double accounting
underline_type = 0
# 1 = Characters are underlined. Redundant; see "underline_type" attribute.
underlined = 0
# Font weight (100-1000). Standard values are 400 for normal text
# and 700 for bold text.
weight = 400
# 1 = Font is outline style (Macintosh only)
outline = 0
# 1 = Font is shadow style (Macintosh only)
shadow = 0
# No methods ...
def handle_efont(book, data): # BIFF2 only
if not book.formatting_info:
return
book.font_list[-1].colour_index = unpack('<H', data)[0]
def handle_font(book, data):
if not book.formatting_info:
return
if not book.encoding:
book.derive_encoding()
verbose = DEBUG or book.verbosity >= 2
bv = book.biff_version
k = len(book.font_list)
if k == 4:
f = Font()
f.name = 'Dummy Font'
f.font_index = k
book.font_list.append(f)
k += 1
f = Font()
f.font_index = k
book.font_list.append(f)
if bv >= 50:
(
f.height, option_flags, f.colour_index, f.weight,
f.escapement_type, f.underline_type, f.family,
f.character_set,
) = unpack('<HHHHHBBB', data[0:13])
f.bold = option_flags & 1
f.italic = (option_flags & 2) >> 1
f.underlined = (option_flags & 4) >> 2
f.struck_out = (option_flags & 8) >> 3
f.outline = (option_flags & 16) >> 4
f.shadow = (option_flags & 32) >> 5
if bv >= 80:
f.name = unpack_unicode(data, 14, lenlen=1)
else:
f.name = unpack_string(data, 14, book.encoding, lenlen=1)
elif bv >= 30:
f.height, option_flags, f.colour_index = unpack('<HHH', data[0:6])
f.bold = option_flags & 1
f.italic = (option_flags & 2) >> 1
f.underlined = (option_flags & 4) >> 2
f.struck_out = (option_flags & 8) >> 3
f.outline = (option_flags & 16) >> 4
f.shadow = (option_flags & 32) >> 5
f.name = unpack_string(data, 6, book.encoding, lenlen=1)
# Now cook up the remaining attributes ...
f.weight = [400, 700][f.bold]
f.escapement_type = 0 # None
f.underline_type = f.underlined # None or Single
f.family = 0 # Unknown / don't care
f.character_set = 1 # System default (0 means "ANSI Latin")
else: # BIFF2
f.height, option_flags = unpack('<HH', data[0:4])
f.colour_index = 0x7FFF # "system window text colour"
f.bold = option_flags & 1
f.italic = (option_flags & 2) >> 1
f.underlined = (option_flags & 4) >> 2
f.struck_out = (option_flags & 8) >> 3
f.outline = 0
f.shadow = 0
f.name = unpack_string(data, 4, book.encoding, lenlen=1)
# Now cook up the remaining attributes ...
f.weight = [400, 700][f.bold]
f.escapement_type = 0 # None
f.underline_type = f.underlined # None or Single
f.family = 0 # Unknown / don't care
f.character_set = 1 # System default (0 means "ANSI Latin")
if verbose:
f.dump(
book.logfile,
header="--- handle_font: font[%d] ---" % f.font_index,
footer="-------------------",
)
# === "Number formats" ===
# "Number format" information from a FORMAT record.
# - New in version 0.6.1
class Format(BaseObject, EqNeAttrs):
# The key into Book.format_map
format_key = 0
# A classification that has been inferred from the format string.
# Currently, this is used only to distinguish between numbers and dates.
# Values:
# FUN = 0 # unknown
# FDT = 1 # date
# FNU = 2 # number
# FGE = 3 # general
# FTX = 4 # text
type = FUN
# The format string
format_str = ''
def __init__(self, format_key, ty, format_str):
self.format_key = format_key
self.type = ty
self.format_str = format_str
std_format_strings = {
# "std" == "standard for US English locale"
# #### TODO ... a lot of work to tailor these to the user's locale.
# See e.g. gnumeric-1.x.y/src/formats.c
0x00: "General",
0x01: "0",
0x02: "0.00",
0x03: "#,##0",
0x04: "#,##0.00",
0x05: "$#,##0_);($#,##0)",
0x06: "$#,##0_);[Red]($#,##0)",
0x07: "$#,##0.00_);($#,##0.00)",
0x08: "$#,##0.00_);[Red]($#,##0.00)",
0x09: "0%",
0x0a: "0.00%",
0x0b: "0.00E+00",
0x0c: "# ?/?",
0x0d: "# ??/??",
0x0e: "m/d/yy",
0x0f: "d-mmm-yy",
0x10: "d-mmm",
0x11: "mmm-yy",
0x12: "h:mm AM/PM",
0x13: "h:mm:ss AM/PM",
0x14: "h:mm",
0x15: "h:mm:ss",
0x16: "m/d/yy h:mm",
0x25: "#,##0_);(#,##0)",
0x26: "#,##0_);[Red](#,##0)",
0x27: "#,##0.00_);(#,##0.00)",
0x28: "#,##0.00_);[Red](#,##0.00)",
0x29: "_(* #,##0_);_(* (#,##0);_(* \"-\"_);_(@_)",
0x2a: "_($* #,##0_);_($* (#,##0);_($* \"-\"_);_(@_)",
0x2b: "_(* #,##0.00_);_(* (#,##0.00);_(* \"-\"??_);_(@_)",
0x2c: "_($* #,##0.00_);_($* (#,##0.00);_($* \"-\"??_);_(@_)",
0x2d: "mm:ss",
0x2e: "[h]:mm:ss",
0x2f: "mm:ss.0",
0x30: "##0.0E+0",
0x31: "@",
}
fmt_code_ranges = [ # both-inclusive ranges of "standard" format codes
# Source: the openoffice.org doc't
( 0, 0, FGE),
( 1, 13, FNU),
(14, 22, FDT),
#### (27, 36, FDT), # Japanese dates -- not sure of reliability of this
(37, 44, FNU),
(45, 47, FDT),
(48, 48, FNU),
(49, 49, FTX),
####(50, 58, FDT), # Japanese dates -- but Gnumeric assumes
# built-in formats finish at 49, not at 163
]
std_format_code_types = {}
for lo, hi, ty in fmt_code_ranges:
for x in range(lo, hi+1):
std_format_code_types[x] = ty
del lo, hi, ty, x
date_chars = 'ymdhs' # year, month/minute, day, hour, second
date_char_dict = {}
for _c in date_chars + date_chars.upper():
date_char_dict[_c] = 5
del _c, date_chars
#(to_py3):
skip_char_dict = frozenset('$-+/(): ')
num_char_dict = {
'0': 5,
'#': 5,
'?': 5,
}
non_date_formats = {
'0.00E+00':1,
'##0.0E+0':1,
'General' :1,
'GENERAL' :1, # OOo Calc 1.1.4 does this.
'general' :1, # pyExcelerator 0.6.3 does this.
'@' :1,
}
fmt_bracketed_sub = re.compile(r'\[[^]]*\]').sub
# Boolean format strings (actual cases)
# u'"Yes";"Yes";"No"'
# u'"True";"True";"False"'
# u'"On";"On";"Off"'
def is_date_format_string(book, fmt):
# Heuristics:
# Ignore "text" and [stuff in square brackets (aarrgghh -- see below)].
# Handle backslashed-escaped chars properly.
# E.g. hh\hmm\mss\s should produce a display like 23h59m59s
# Date formats have one or more of ymdhs (caseless) in them.
# Numeric formats have # and 0.
# N.B. u'General"."' hence get rid of "text" first.
# TODO: Find where formats are interpreted in Gnumeric
# TODO: u'[h]\\ \\h\\o\\u\\r\\s' ([h] means don't care about hours > 23)
state = 0
s = ''
# (to_py3): ignorable = skip_char_dict.has_key
for c in fmt:
if state == 0:
if c == '"':
state = 1
elif c in r"\_*":
state = 2
elif c in skip_char_dict: # (to_py3):
pass
else:
s += c
elif state == 1:
if c == '"':
state = 0
elif state == 2:
# Ignore char after backslash, underscore or asterisk
state = 0
assert 0 <= state <= 2
if book.verbosity >= 4:
print("is_date_format_string: reduced format is %r" % s)
s = fmt_bracketed_sub('', s)
if s in non_date_formats:
return False
state = 0
separator = ";"
got_sep = 0
date_count = num_count = 0
for c in s:
if c in date_char_dict:
date_count += date_char_dict[c]
elif c in num_char_dict:
num_count += num_char_dict[c]
elif c == separator:
got_sep = 1
# print num_count, date_count, repr(fmt)
if date_count and not num_count:
return True
if num_count and not date_count:
return False
if date_count:
fprintf(book.logfile,
'WARNING *** is_date_format: ambiguous d=%d n=%d fmt=%r\n',
date_count, num_count, fmt)
elif not got_sep:
fprintf(book.logfile,
"WARNING *** format %r produces constant result\n",
fmt)
return date_count > num_count
def handle_format(self, data, rectype=XL_FORMAT):
DEBUG = 0
bv = self.biff_version
if rectype == XL_FORMAT2:
bv = min(bv, 30)
if not self.encoding:
self.derive_encoding()
strpos = 2
if bv >= 50:
fmtkey = unpack('<H', data[0:2])[0]
else:
fmtkey = self.actualfmtcount
if bv <= 30:
strpos = 0
self.actualfmtcount += 1
if bv >= 80:
unistrg = unpack_unicode(data, 2)
else:
unistrg = unpack_string(data, strpos, self.encoding, lenlen=1)
verbose = DEBUG or self.verbosity >= 3
if verbose:
fprintf(self.logfile,
"FORMAT: count=%d fmtkey=0x%04x (%d) s=%r\n",
self.actualfmtcount, fmtkey, fmtkey, unistrg)
is_date_s = self.is_date_format_string(unistrg)
ty = [FGE, FDT][is_date_s]
if not(fmtkey > 163 or bv < 50):
# user_defined if fmtkey > 163
# N.B. Gnumeric incorrectly starts these at 50 instead of 164 :-(
# if earlier than BIFF 5, standard info is useless
std_ty = std_format_code_types.get(fmtkey, FUN)
# print "std ty", std_ty
is_date_c = std_ty == FDT
if 0 < fmtkey < 50 and (is_date_c ^ is_date_s):
DEBUG = 2
fprintf(self.logfile,
"WARNING *** Conflict between "
"std format key %d and its format string %r\n",
fmtkey, unistrg)
if DEBUG == 2:
fprintf(self.logfile,
"ty: %d; is_date_c: %r; is_date_s: %r; fmt_strg: %r",
ty, is_date_c, is_date_s, unistrg)
fmtobj = Format(fmtkey, ty, unistrg)
if verbose:
fmtobj.dump(self.logfile,
header="--- handle_format [%d] ---" % (self.actualfmtcount-1, ))
self.format_map[fmtkey] = fmtobj
self.format_list.append(fmtobj)
# =============================================================================
def handle_palette(book, data):
if not book.formatting_info:
return
verbose = DEBUG or book.verbosity >= 2
n_colours, = unpack('<H', data[:2])
expected_n_colours = (16, 56)[book.biff_version >= 50]
if ((DEBUG or book.verbosity >= 1)
and n_colours != expected_n_colours):
fprintf(book.logfile,
"NOTE *** Expected %d colours in PALETTE record, found %d\n",
expected_n_colours, n_colours)
elif verbose:
fprintf(book.logfile,
"PALETTE record with %d colours\n", n_colours)
fmt = '<xx%di' % n_colours # use i to avoid long integers
expected_size = 4 * n_colours + 2
actual_size = len(data)
tolerance = 4
if not expected_size <= actual_size <= expected_size + tolerance:
raise XLRDError('PALETTE record: expected size %d, actual size %d' % (expected_size, actual_size))
colours = unpack(fmt, data[:expected_size])
assert book.palette_record == [] # There should be only 1 PALETTE record
# a colour will be 0xbbggrr
# IOW, red is at the little end
for i in range(n_colours):
c = colours[i]
red = c & 0xff
green = (c >> 8) & 0xff
blue = (c >> 16) & 0xff
old_rgb = book.colour_map[8+i]
new_rgb = (red, green, blue)
book.palette_record.append(new_rgb)
book.colour_map[8+i] = new_rgb
if verbose:
if new_rgb != old_rgb:
print("%2d: %r -> %r" % (i, old_rgb, new_rgb), file=book.logfile)
def palette_epilogue(book):
# Check colour indexes in fonts etc.
# This must be done here as FONT records
# come *before* the PALETTE record :-(
for font in book.font_list:
if font.font_index == 4: # the missing font record
continue
cx = font.colour_index
if cx == 0x7fff: # system window text colour
continue
if cx in book.colour_map:
book.colour_indexes_used[cx] = 1
else:
print("Size of colour table:", len(book.colour_map))
print("*** Font #%d (%r): colour index 0x%04x is unknown" \
% (font.font_index, font.name, cx), file=book.logfile)
if book.verbosity >= 1:
used = list(book.colour_indexes_used.keys())
used.sort()
print("\nColour indexes used:\n%r\n" % used, file=book.logfile)
def handle_style(book, data):
verbose = DEBUG or book.verbosity >= 2
bv = book.biff_version
flag_and_xfx, built_in_id, level = unpack('<HBB', data[:4])
xf_index = flag_and_xfx & 0x0fff
if (data == "\0\0\0\0"
and "Normal" not in book.style_name_map):
# Erroneous record (doesn't have built-in bit set).
# Example file supplied by Jeff Bell.
built_in = 1
built_in_id = 0
xf_index = 0
name = "Normal"
level = 255
elif flag_and_xfx & 0x8000:
# built-in style
built_in = 1
name = built_in_style_names[built_in_id]
if 1 <= built_in_id <= 2:
name += str(level + 1)
else:
# user-defined style
if bv >= 80:
name = unpack_unicode(data, 2, lenlen=2)
else:
name = unpack_string(data, 2, book.encoding, lenlen=1)
if verbose and not name:
print("WARNING *** A user-defined style has a zero-length name", file=book.logfile)
built_in = 0
built_in_id = 0
level = 0
book.style_name_map[name] = (built_in, xf_index)
if verbose:
print("STYLE: built_in=%d xf_index=%d built_in_id=%d level=%d name=%r" \
% (built_in, xf_index, built_in_id, level, name), file=book.logfile)
def check_colour_indexes_in_obj(book, obj, orig_index):
alist = list(obj.__dict__.items())
alist.sort()
for attr, nobj in alist:
if hasattr(nobj, 'dump'):
check_colour_indexes_in_obj(book, nobj, orig_index)
elif attr.find('colour_index') >= 0:
if nobj in book.colour_map:
book.colour_indexes_used[nobj] = 1
continue
oname = obj.__class__.__name__
print("*** xf #%d : %s.%s = 0x%04x (unknown)" \
% (orig_index, oname, attr, nobj), file=book.logfile)
def handle_xf(self, data):
### self is a Book instance
# DEBUG = 0
verbose = DEBUG or self.verbosity >= 3
bv = self.biff_version
xf = XF()
xf.alignment = XFAlignment()
xf.alignment.indent_level = 0
xf.alignment.shrink_to_fit = 0
xf.alignment.text_direction = 0
xf.border = XFBorder()
xf.border.diag_up = 0
xf.border.diag_down = 0
xf.border.diag_colour_index = 0
xf.border.diag_line_style = 0 # no line
xf.background = XFBackground()
xf.protection = XFProtection()
# fill in the known standard formats
if bv >= 50 and not self.xfcount:
# i.e. do this once before we process the first XF record
for x in list(std_format_code_types.keys()):
if x not in self.format_map:
ty = std_format_code_types[x]
fmt_str = std_format_strings[x]
fmtobj = Format(x, ty, fmt_str)
self.format_map[x] = fmtobj
if bv >= 80:
unpack_fmt = '<HHHBBBBIiH'
(xf.font_index, xf.format_key, pkd_type_par,
pkd_align1, xf.alignment.rotation, pkd_align2,
pkd_used, pkd_brdbkg1, pkd_brdbkg2, pkd_brdbkg3,
) = unpack(unpack_fmt, data[0:20])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
# Following is not in OOo docs, but is mentioned
# in Gnumeric source and also in (deep breath)
# org.apache.poi.hssf.record.ExtendedFormatRecord.java
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align1, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x70, 'vert_align'),
))
upkbits(xf.alignment, pkd_align2, (
(0, 0x0f, 'indent_level'),
(4, 0x10, 'shrink_to_fit'),
(6, 0xC0, 'text_direction'),
))
reg = pkd_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbitsL(xf.border, pkd_brdbkg1, (
(0, 0x0000000f, 'left_line_style'),
(4, 0x000000f0, 'right_line_style'),
(8, 0x00000f00, 'top_line_style'),
(12, 0x0000f000, 'bottom_line_style'),
(16, 0x007f0000, 'left_colour_index'),
(23, 0x3f800000, 'right_colour_index'),
(30, 0x40000000, 'diag_down'),
(31, 0x80000000, 'diag_up'),
))
upkbits(xf.border, pkd_brdbkg2, (
(0, 0x0000007F, 'top_colour_index'),
(7, 0x00003F80, 'bottom_colour_index'),
(14, 0x001FC000, 'diag_colour_index'),
(21, 0x01E00000, 'diag_line_style'),
))
upkbitsL(xf.background, pkd_brdbkg2, (
(26, 0xFC000000, 'fill_pattern'),
))
upkbits(xf.background, pkd_brdbkg3, (
(0, 0x007F, 'pattern_colour_index'),
(7, 0x3F80, 'background_colour_index'),
))
elif bv >= 50:
unpack_fmt = '<HHHBBIi'
(xf.font_index, xf.format_key, pkd_type_par,
pkd_align1, pkd_orient_used,
pkd_brdbkg1, pkd_brdbkg2,
) = unpack(unpack_fmt, data[0:16])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align1, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x70, 'vert_align'),
))
orientation = pkd_orient_used & 0x03
xf.alignment.rotation = [0, 255, 90, 180][orientation]
reg = pkd_orient_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbitsL(xf.background, pkd_brdbkg1, (
( 0, 0x0000007F, 'pattern_colour_index'),
( 7, 0x00003F80, 'background_colour_index'),
(16, 0x003F0000, 'fill_pattern'),
))
upkbitsL(xf.border, pkd_brdbkg1, (
(22, 0x01C00000, 'bottom_line_style'),
(25, 0xFE000000, 'bottom_colour_index'),
))
upkbits(xf.border, pkd_brdbkg2, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x00000038, 'left_line_style'),
( 6, 0x000001C0, 'right_line_style'),
( 9, 0x0000FE00, 'top_colour_index'),
(16, 0x007F0000, 'left_colour_index'),
(23, 0x3F800000, 'right_colour_index'),
))
elif bv >= 40:
unpack_fmt = '<BBHBBHI'
(xf.font_index, xf.format_key, pkd_type_par,
pkd_align_orient, pkd_used,
pkd_bkg_34, pkd_brd_34,
) = unpack(unpack_fmt, data[0:12])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align_orient, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x30, 'vert_align'),
))
orientation = (pkd_align_orient & 0xC0) >> 6
xf.alignment.rotation = [0, 255, 90, 180][orientation]
reg = pkd_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbits(xf.background, pkd_bkg_34, (
( 0, 0x003F, 'fill_pattern'),
( 6, 0x07C0, 'pattern_colour_index'),
(11, 0xF800, 'background_colour_index'),
))
upkbitsL(xf.border, pkd_brd_34, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x000000F8, 'top_colour_index'),
( 8, 0x00000700, 'left_line_style'),
(11, 0x0000F800, 'left_colour_index'),
(16, 0x00070000, 'bottom_line_style'),
(19, 0x00F80000, 'bottom_colour_index'),
(24, 0x07000000, 'right_line_style'),
(27, 0xF8000000, 'right_colour_index'),
))
elif bv == 30:
unpack_fmt = '<BBBBHHI'
(xf.font_index, xf.format_key, pkd_type_prot,
pkd_used, pkd_align_par,
pkd_bkg_34, pkd_brd_34,
) = unpack(unpack_fmt, data[0:12])
upkbits(xf.protection, pkd_type_prot, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_prot, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
))
upkbits(xf.alignment, pkd_align_par, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
))
upkbits(xf, pkd_align_par, (
(4, 0xFFF0, 'parent_style_index'),
))
reg = pkd_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbits(xf.background, pkd_bkg_34, (
( 0, 0x003F, 'fill_pattern'),
( 6, 0x07C0, 'pattern_colour_index'),
(11, 0xF800, 'background_colour_index'),
))
upkbitsL(xf.border, pkd_brd_34, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x000000F8, 'top_colour_index'),
( 8, 0x00000700, 'left_line_style'),
(11, 0x0000F800, 'left_colour_index'),
(16, 0x00070000, 'bottom_line_style'),
(19, 0x00F80000, 'bottom_colour_index'),
(24, 0x07000000, 'right_line_style'),
(27, 0xF8000000, 'right_colour_index'),
))
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
elif bv == 21:
#### Warning: incomplete treatment; formatting_info not fully supported.
#### Probably need to offset incoming BIFF2 XF[n] to BIFF8-like XF[n+16],
#### and create XF[0:16] like the standard ones in BIFF8
#### *AND* add 16 to all XF references in cell records :-(
(xf.font_index, format_etc, halign_etc) = unpack('<BxBB', data)
xf.format_key = format_etc & 0x3F
upkbits(xf.protection, format_etc, (
(6, 0x40, 'cell_locked'),
(7, 0x80, 'formula_hidden'),
))
upkbits(xf.alignment, halign_etc, (
(0, 0x07, 'hor_align'),
))
for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')):
if halign_etc & mask:
colour_index, line_style = 8, 1 # black, thin
else:
colour_index, line_style = 0, 0 # none, none
setattr(xf.border, side + '_colour_index', colour_index)
setattr(xf.border, side + '_line_style', line_style)
bg = xf.background
if halign_etc & 0x80:
bg.fill_pattern = 17
else:
bg.fill_pattern = 0
bg.background_colour_index = 9 # white
bg.pattern_colour_index = 8 # black
xf.parent_style_index = 0 # ???????????
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, 1)
else:
raise XLRDError('programmer stuff-up: bv=%d' % bv)
xf.xf_index = len(self.xf_list)
self.xf_list.append(xf)
self.xfcount += 1
if verbose:
xf.dump(
self.logfile,
header="--- handle_xf: xf[%d] ---" % xf.xf_index,
footer=" ",
)
# Now for some assertions ...
if self.formatting_info:
if xf.is_style and xf.parent_style_index != 0x0FFF:
msg = "WARNING *** XF[%d] is a style XF but parent_style_index is 0x%04x, not 0x0fff\n"
fprintf(self.logfile, msg, xf.xf_index, xf.parent_style_index)
check_colour_indexes_in_obj(self, xf, xf.xf_index)
if xf.format_key not in self.format_map:
msg = "WARNING *** XF[%d] unknown (raw) format key (%d, 0x%04x)\n"
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
xf.format_key = 0
def xf_epilogue(self):
# self is a Book instance.
self._xf_epilogue_done = 1
num_xfs = len(self.xf_list)
verbose = DEBUG or self.verbosity >= 3
verbose1 = DEBUG or self.verbosity >= 1
if verbose:
fprintf(self.logfile, "xf_epilogue called ...\n")
def check_same(book_arg, xf_arg, parent_arg, attr):
# the _arg caper is to avoid a Warning msg from Python 2.1 :-(
if getattr(xf_arg, attr) != getattr(parent_arg, attr):
fprintf(book_arg.logfile,
"NOTE !!! XF[%d] parent[%d] %s different\n",
xf_arg.xf_index, parent_arg.xf_index, attr)
for xfx in range(num_xfs):
xf = self.xf_list[xfx]
if xf.format_key not in self.format_map:
msg = "ERROR *** XF[%d] unknown format key (%d, 0x%04x)\n"
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
xf.format_key = 0
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
fmt = self.format_map[xf.format_key]
cellty = cellty_from_fmtty[fmt.type]
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
# Now for some assertions etc
if not self.formatting_info:
continue
if xf.is_style:
continue
if not(0 <= xf.parent_style_index < num_xfs):
fprintf(self.logfile,
"WARNING *** XF[%d]: is_style=%d but parent_style_index=%d\n",
xf.xf_index, xf.is_style, xf.parent_style_index)
# make it conform
xf.parent_style_index = 0
if self.biff_version >= 30:
assert xf.parent_style_index != xf.xf_index
assert self.xf_list[xf.parent_style_index].is_style
if verbose1 and xf.parent_style_index > xf.xf_index:
fprintf(self.logfile,
"NOTE !!! XF[%d]: parent_style_index is %d; out of order?\n",
xf.xf_index, xf.parent_style_index)
parent = self.xf_list[xf.parent_style_index]
if not xf._alignment_flag and not parent._alignment_flag:
if verbose1: check_same(self, xf, parent, 'alignment')
if not xf._background_flag and not parent._background_flag:
if verbose1: check_same(self, xf, parent, 'background')
if not xf._border_flag and not parent._border_flag:
if verbose1: check_same(self, xf, parent, 'border')
if not xf._protection_flag and not parent._protection_flag:
if verbose1: check_same(self, xf, parent, 'protection')
if not xf._format_flag and not parent._format_flag:
if verbose1 and xf.format_key != parent.format_key:
fprintf(self.logfile,
"NOTE !!! XF[%d] fmtk=%d, parent[%d] fmtk=%r\n%r / %r\n",
xf.xf_index, xf.format_key, parent.xf_index, parent.format_key,
self.format_map[xf.format_key].format_str,
self.format_map[parent.format_key].format_str)
if not xf._font_flag and not parent._font_flag:
if verbose1 and xf.font_index != parent.font_index:
fprintf(self.logfile,
"NOTE !!! XF[%d] fontx=%d, parent[%d] fontx=%r\n",
xf.xf_index, xf.font_index, parent.xf_index, parent.font_index)
def initialise_book(book):
initialise_colour_map(book)
book._xf_epilogue_done = 0
methods = (
handle_font,
handle_efont,
handle_format,
is_date_format_string,
handle_palette,
palette_epilogue,
handle_style,
handle_xf,
xf_epilogue,
)
for method in methods:
setattr(book.__class__, method.__name__, method)
class XFBorder(BaseObject, EqNeAttrs):
""" A collection of the border-related attributes of an XF record.
Items correspond to those in the Excel UI's Format/Cells/Border tab.
An explanations of "colour index" is given in the Formatting
section at the start of this document.
There are five line style attributes; possible values and the
associated meanings are:
0 = No line,
1 = Thin,
2 = Medium,
3 = Dashed,
4 = Dotted,
5 = Thick,
6 = Double,
7 = Hair,
8 = Medium dashed,
9 = Thin dash-dotted,
10 = Medium dash-dotted,
11 = Thin dash-dot-dotted,
12 = Medium dash-dot-dotted,
13 = Slanted medium dash-dotted.
The line styles 8 to 13 appear in BIFF8 files (Excel 97 and later) only.
For pictures of the line styles, refer to OOo docs s3.10 (p22)
"Line Styles for Cell Borders (BIFF3-BIFF8)".</p>
- New in version 0.6.1
"""
# The colour index for the cell's top line
top_colour_index = 0
# The colour index for the cell's bottom line
bottom_colour_index = 0
# The colour index for the cell's left line
left_colour_index = 0
# The colour index for the cell's right line
right_colour_index = 0
# The colour index for the cell's diagonal lines, if any
diag_colour_index = 0
# The line style for the cell's top line
top_line_style = 0
# The line style for the cell's bottom line
bottom_line_style = 0
# The line style for the cell's left line
left_line_style = 0
# The line style for the cell's right line
right_line_style = 0
# The line style for the cell's diagonal lines, if any
diag_line_style = 0
# 1 = draw a diagonal from top left to bottom right
diag_down = 0
# 1 = draw a diagonal from bottom left to top right
diag_up = 0
# New in version 0.6.1
class XFBackground(BaseObject, EqNeAttrs):
""" A collection of the background-related attributes of an XF record.
Items correspond to those in the Excel UI's Format/Cells/Patterns tab.
An explanation of "colour index" is given in the Formatting
section at the start of this document.
"""
# See section 3.11 of the OOo docs.
fill_pattern = 0
# See section 3.11 of the OOo docs.
background_colour_index = 0
# See section 3.11 of the OOo docs.
pattern_colour_index = 0
# New in version 0.6.1
class XFAlignment(BaseObject, EqNeAttrs):
""" A collection of the alignment and similar attributes of an XF record.
Items correspond to those in the Excel UI's Format/Cells/Alignment tab.
"""
# Values: section 5.115 (p 219) of OOo docs
hor_align = 0
# Values: section 5.115 (p 220) of OOo docs
vert_align = 0
# Values: section 5.115 (p 220) of OOo docs.
# Note: file versions BIFF7 and earlier use the documented
# "orientation" attribute; this will be mapped (without loss)
# into "rotation".
rotation = 0
# 1 = text is wrapped at right margin
text_wrapped = 0
# A number in range(15).
indent_level = 0
# 1 = shrink font size to fit text into cell.
shrink_to_fit = 0
# 0 = according to context; 1 = left-to-right; 2 = right-to-left
text_direction = 0
# New in version 0.6.1
class XFProtection(BaseObject, EqNeAttrs):
""" A collection of the protection-related attributes of an XF record.
Items correspond to those in the Excel UI's Format/Cells/Protection tab.
Note the OOo docs include the "cell or style" bit
in this bundle of attributes.
This is incorrect; the bit is used in determining which bundles to use.
"""
# 1 = Cell is prevented from being changed, moved, resized, or deleted
# (only if the sheet is protected).
cell_locked = 0
# 1 = Hide formula so that it doesn't appear in the formula bar when
# the cell is selected (only if the sheet is protected).
formula_hidden = 0
# New in version 0.6.1
class XF(BaseObject):
""" eXtended Formatting information for cells, rows, columns and styles.
Each of the 6 flags below describes the validity of
a specific group of attributes.
In cell XFs, flag==0 means the attributes of the parent style XF are used,
(but only if the attributes are valid there); flag==1 means the attributes
of this XF are used.
In style XFs, flag==0 means the attribute setting is valid; flag==1 means
the attribute should be ignored.
Note that the API
provides both "raw" XFs and "computed" XFs -- in the latter case, cell XFs
have had the above inheritance mechanism applied.
"""
# 0 = cell XF, 1 = style XF
is_style = 0
# cell XF: Index into Book.xf_list
# of this XF's style XF
# style XF: 0xFFF
parent_style_index = 0
_format_flag = 0
_font_flag = 0
_alignment_flag = 0
_border_flag = 0
_background_flag = 0
_protection_flag = 0
# Index into Book.xf_list
xf_index = 0
# Index into Book.font_list
font_index = 0
# Key into Book.format_map
#
# Warning: OOo docs on the XF record call this "Index to FORMAT record".
# It is not an index in the Python sense. It is a key to a map.
# It is true **only** for Excel 4.0 and earlier files
# that the key into format_map from an XF instance
# is the same as the index into format_list, and **only**
# if the index is less than 164.
#
format_key = 0
# An instance of an XFProtection object.
protection = None
# An instance of an XFBackground object.
background = None
# An instance of an XFAlignment object.
alignment = None
# An instance of an XFBorder object.
border = None
| mit |
pengdai2/sudoku | strategy/wxyz_wing.py | 1 | 2314 | #
# WXYZ-Wing strategy module
#
import itertools
from logger import *
from playbook import *
from sudoku import *
from almost_locked_set import *
class WXYZWing(AlmostLockedSet):
__metaclass__ = StrategyMeta
"""
WXYZ-WING can be considered as a group of 4 nodes and 4 hints,
that has exactly one non-restricted common hint. We use that
hint (Z) to eliminate since at least one of Z will be the solution.
WXYZ-WING consists of a hinge, or the subset of nodes that can see
all other nodes. If the hinge contains Z, Z may be eliminated from
an area visible to all nodes in the WXYZ-WING. Otherwise, the area
expands to one that is visible to all nodes but the hinge.
WXYZ-WING is actually a special case of ALS, where the wing is made
up of a 1-node and a 3-node ALS, respectively. The two ALS's share
a restricted common hint, W, and a unrestricted common hint, Z. As
such, the total number of hints remains 4 across the two ALS's.
"""
def __init__(self):
AlmostLockedSet.__init__(self, "WXYZ-WING")
"""
Validate the WXYZ-WING pattern and process it if found.
"""
def wxyz_wing(self, plan, als1, als3):
rcs, ucs = self.als_urc_hints(als1, als3)
if len(ucs) != 1 or len(rcs) != 1:
return False
hinge = self.als_hinge(als1, als3)
wing = (als1 | als3) - hinge
reason = {"hint": ucs, "hinge": hinge, "wing": wing}
hint = ucs.pop()
ucs.add(hint)
overlap = self.als_related(als1, hint) & self.als_related(als3, hint)
if self.test_purge(overlap, set(ucs)):
self.purge_hints(plan, overlap, set(ucs), reason)
return True
return False
"""
WXYZ-WING strategy.
"""
def run(self, plan):
status = False
alsets1 = self.als_find_in_nodes(plan.get_sudoku().get_incomplete(), [1])
alsets3 = self.als_find_in_lots(plan.get_sudoku().get_lots(), [3])
for als1 in alsets1:
if any([x.is_complete() for x in als1]):
continue
for als3 in alsets3:
if any([x.is_complete() for x in als3]):
continue
if self.wxyz_wing(plan, als1, als3):
status = True
return status
| apache-2.0 |
zero-ui/miniblink49 | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/results.py | 64 | 8758 | """Results of coverage measurement."""
import os
from coverage.backward import set, sorted # pylint: disable=W0622
from coverage.misc import format_lines, join_regex, NoSource
from coverage.parser import CodeParser
class Analysis(object):
"""The results of analyzing a code unit."""
def __init__(self, cov, code_unit):
self.coverage = cov
self.code_unit = code_unit
self.filename = self.code_unit.filename
ext = os.path.splitext(self.filename)[1]
source = None
if ext == '.py':
if not os.path.exists(self.filename):
source = self.coverage.file_locator.get_zip_data(self.filename)
if not source:
raise NoSource("No source for code: %r" % self.filename)
self.parser = CodeParser(
text=source, filename=self.filename,
exclude=self.coverage._exclude_regex('exclude')
)
self.statements, self.excluded = self.parser.parse_source()
# Identify missing statements.
executed = self.coverage.data.executed_lines(self.filename)
exec1 = self.parser.first_lines(executed)
self.missing = sorted(set(self.statements) - set(exec1))
if self.coverage.data.has_arcs():
self.no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list)
)
n_branches = self.total_branches()
mba = self.missing_branch_arcs()
n_missing_branches = sum(
[len(v) for k,v in mba.items() if k not in self.missing]
)
else:
n_branches = n_missing_branches = 0
self.no_branch = set()
self.numbers = Numbers(
n_files=1,
n_statements=len(self.statements),
n_excluded=len(self.excluded),
n_missing=len(self.missing),
n_branches=n_branches,
n_missing_branches=n_missing_branches,
)
def missing_formatted(self):
"""The missing line numbers, formatted nicely.
Returns a string like "1-2, 5-11, 13-14".
"""
return format_lines(self.statements, self.missing)
def has_arcs(self):
"""Were arcs measured in this result?"""
return self.coverage.data.has_arcs()
def arc_possibilities(self):
"""Returns a sorted list of the arcs in the code."""
arcs = self.parser.arcs()
return arcs
def arcs_executed(self):
"""Returns a sorted list of the arcs actually executed in the code."""
executed = self.coverage.data.executed_arcs(self.filename)
m2fl = self.parser.first_line
executed = [(m2fl(l1), m2fl(l2)) for (l1,l2) in executed]
return sorted(executed)
def arcs_missing(self):
"""Returns a sorted list of the arcs in the code not executed."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
missing = [
p for p in possible
if p not in executed
and p[0] not in self.no_branch
]
return sorted(missing)
def arcs_unpredicted(self):
"""Returns a sorted list of the executed arcs missing from the code."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
# Exclude arcs here which connect a line to itself. They can occur
# in executed data in some cases. This is where they can cause
# trouble, and here is where it's the least burden to remove them.
unpredicted = [
e for e in executed
if e not in possible
and e[0] != e[1]
]
return sorted(unpredicted)
def branch_lines(self):
"""Returns a list of line numbers that have more than one exit."""
exit_counts = self.parser.exit_counts()
return [l1 for l1,count in exit_counts.items() if count > 1]
def total_branches(self):
"""How many total branches are there?"""
exit_counts = self.parser.exit_counts()
return sum([count for count in exit_counts.values() if count > 1])
def missing_branch_arcs(self):
"""Return arcs that weren't executed from branch lines.
Returns {l1:[l2a,l2b,...], ...}
"""
missing = self.arcs_missing()
branch_lines = set(self.branch_lines())
mba = {}
for l1, l2 in missing:
if l1 in branch_lines:
if l1 not in mba:
mba[l1] = []
mba[l1].append(l2)
return mba
def branch_stats(self):
"""Get stats about branches.
Returns a dict mapping line numbers to a tuple:
(total_exits, taken_exits).
"""
exit_counts = self.parser.exit_counts()
missing_arcs = self.missing_branch_arcs()
stats = {}
for lnum in self.branch_lines():
exits = exit_counts[lnum]
try:
missing = len(missing_arcs[lnum])
except KeyError:
missing = 0
stats[lnum] = (exits, exits - missing)
return stats
class Numbers(object):
"""The numerical results of measuring coverage.
This holds the basic statistics from `Analysis`, and is used to roll
up statistics across files.
"""
# A global to determine the precision on coverage percentages, the number
# of decimal places.
_precision = 0
_near0 = 1.0 # These will change when _precision is changed.
_near100 = 99.0
def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0,
n_branches=0, n_missing_branches=0
):
self.n_files = n_files
self.n_statements = n_statements
self.n_excluded = n_excluded
self.n_missing = n_missing
self.n_branches = n_branches
self.n_missing_branches = n_missing_branches
def set_precision(cls, precision):
"""Set the number of decimal places used to report percentages."""
assert 0 <= precision < 10
cls._precision = precision
cls._near0 = 1.0 / 10**precision
cls._near100 = 100.0 - cls._near0
set_precision = classmethod(set_precision)
def _get_n_executed(self):
"""Returns the number of executed statements."""
return self.n_statements - self.n_missing
n_executed = property(_get_n_executed)
def _get_n_executed_branches(self):
"""Returns the number of executed branches."""
return self.n_branches - self.n_missing_branches
n_executed_branches = property(_get_n_executed_branches)
def _get_pc_covered(self):
"""Returns a single percentage value for coverage."""
if self.n_statements > 0:
pc_cov = (100.0 * (self.n_executed + self.n_executed_branches) /
(self.n_statements + self.n_branches))
else:
pc_cov = 100.0
return pc_cov
pc_covered = property(_get_pc_covered)
def _get_pc_covered_str(self):
"""Returns the percent covered, as a string, without a percent sign.
Note that "0" is only returned when the value is truly zero, and "100"
is only returned when the value is truly 100. Rounding can never
result in either "0" or "100".
"""
pc = self.pc_covered
if 0 < pc < self._near0:
pc = self._near0
elif self._near100 < pc < 100:
pc = self._near100
else:
pc = round(pc, self._precision)
return "%.*f" % (self._precision, pc)
pc_covered_str = property(_get_pc_covered_str)
def pc_str_width(cls):
"""How many characters wide can pc_covered_str be?"""
width = 3 # "100"
if cls._precision > 0:
width += 1 + cls._precision
return width
pc_str_width = classmethod(pc_str_width)
def __add__(self, other):
nums = Numbers()
nums.n_files = self.n_files + other.n_files
nums.n_statements = self.n_statements + other.n_statements
nums.n_excluded = self.n_excluded + other.n_excluded
nums.n_missing = self.n_missing + other.n_missing
nums.n_branches = self.n_branches + other.n_branches
nums.n_missing_branches = (self.n_missing_branches +
other.n_missing_branches)
return nums
def __radd__(self, other):
# Implementing 0+Numbers allows us to sum() a list of Numbers.
if other == 0:
return self
return NotImplemented
| gpl-3.0 |
DirkHoffmann/indico | indico/core/oauth/__init__.py | 4 | 1247 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
from indico.core import signals
from indico.core.db import db
from .logger import logger
from .oauth2 import require_oauth
__all__ = ['require_oauth']
@signals.app_created.connect
def _no_ssl_required_on_debug(app, **kwargs):
if app.debug or app.testing:
os.environ['AUTHLIB_INSECURE_TRANSPORT'] = '1'
@signals.users.merged.connect
def _delete_merged_user_tokens(target, source, **kwargs):
target_app_links = {link.application: link for link in target.oauth_app_links}
for source_link in source.oauth_app_links.all():
try:
target_link = target_app_links[source_link.application]
except KeyError:
logger.info('merge: reassigning %r to %r', source_link, target)
source_link.user = target
else:
logger.info('merge: merging %r into %r', source_link, target_link)
target_link.update_scopes(set(source_link.scopes))
target_link.tokens.extend(source_link.tokens)
db.session.delete(source_link)
| gpl-3.0 |
psav/cfme_tests | cfme/services/service_catalogs/__init__.py | 3 | 4282 | import importscan
import sentaku
from widgetastic.widget import ParametrizedView, Select, Text, View
from widgetastic_patternfly import Button, Input, BootstrapSelect
from widgetastic.utils import deflatten_dict, Parameter, ParametrizedString, VersionPick
from cfme.common import Taggable
from cfme.exceptions import ItemNotFound
from cfme.utils.appliance import Navigatable
from cfme.utils.update import Updateable
from cfme.utils.version import Version
class ServiceCatalogs(Navigatable, Taggable, Updateable, sentaku.modeling.ElementMixin):
"""
Service Catalogs main class to context switch between ui
and ssui. All the below methods are implemented in both ui
and ssui side .
"""
order = sentaku.ContextualMethod()
add_to_shopping_cart = sentaku.ContextualMethod()
def __init__(self, appliance, catalog=None, name=None, stack_data=None,
dialog_values=None, ansible_dialog_values=None):
Navigatable.__init__(self, appliance=appliance)
self.catalog = catalog
self.name = name
self.stack_data = stack_data
self.dialog_values = dialog_values
self.ansible_dialog_values = ansible_dialog_values
self.parent = self.appliance.context
class BaseOrderForm(View):
"""Represents the order form of a service.
This form doesn't have a static set of elements apart from titles and buttons. In the most cases
the fields can be either regular inputs or dropdowns. Their locators depend on field names. In
order to find and fill required fields a parametrized view is used here. The keys of a fill
dictionary should match ids of the fields. For instance there is a field with such html
<input id="some_key"></input>, so a fill dictionary should look like that:
{"some_key": "some_value"}
"""
title = Text('#explorer_title_text')
dialog_title = Text(
VersionPick({
Version.lowest(): ".//div[@id='main_div']//h3",
"5.9": ".//div[@id='main_div']//h2"
})
)
@ParametrizedView.nested
class fields(ParametrizedView): # noqa
PARAMETERS = ("key",)
input = Input(id=Parameter("key"))
select = Select(id=Parameter("key"))
param_input = Input(id=ParametrizedString("param_{key}"))
dropdown = VersionPick({
Version.lowest(): BootstrapSelect(Parameter("key")),
"5.9": BootstrapSelect(locator=ParametrizedString(
".//div[contains(@class, 'bootstrap-select') and "
"select[@id={key|quote}]]"))
})
param_dropdown = VersionPick({
Version.lowest(): BootstrapSelect(ParametrizedString("param_{key}")),
"5.9": BootstrapSelect(locator=ParametrizedString(
".//div[contains(@class, 'bootstrap-select') and "
"select[@id='param_{key}']]"))
})
@property
def visible_widget(self):
if self.input.is_displayed:
return self.input
elif self.dropdown.is_displayed:
return self.dropdown
elif self.param_input.is_displayed:
return self.param_input
elif self.param_dropdown.is_displayed:
return self.param_dropdown
elif self.select.is_displayed:
return self.select
else:
raise ItemNotFound("Visible widget is not found")
def read(self):
return self.visible_widget.read()
def fill(self, value):
return self.visible_widget.fill(value)
def fill(self, fill_data):
values = deflatten_dict(fill_data)
was_change = False
self.before_fill(values)
for key, value in values.items():
widget = self.fields(key)
if value is None:
self.logger.debug('Skipping fill of %r because value was None', key)
continue
try:
if widget.fill(value):
was_change = True
except NotImplementedError:
continue
self.after_fill(was_change)
return was_change
from . import ui, ssui # NOQA last for import cycles
importscan.scan(ui)
importscan.scan(ssui)
| gpl-2.0 |
UltronAI/Deep-Learning | Pattern-Recognition/hw1-Linear-Classifier/scripts/svm/svm-20-2.py | 1 | 1329 | import numpy as np
import random
from sklearn import svm
trainDir = "../../traindata.txt"
testDir = "../../testdata.txt"
feature = [3, 4]
Xtrain = np.loadtxt(trainDir)[:, feature]
Ytrain = np.loadtxt(trainDir)[:, 10]
Xtest = np.loadtxt(testDir)[:, feature]
Ytest = np.loadtxt(testDir)[:, 10]
MIndex = np.where(Ytrain==1)[0]
FIndex = np.where(Ytrain==0)[0]
subN = 10
minErr = 1
mean = 0
for n in range(10):
MIndexSub = MIndex[np.array(random.sample(range(MIndex.shape[0]), subN))]
FIndexSub = FIndex[np.array(random.sample(range(FIndex.shape[0]), subN))]
indexSub = np.concatenate((MIndexSub, FIndexSub))
XtrainSub = Xtrain[indexSub]
YtrainSub = Ytrain[indexSub]
clf = svm.SVC(kernel='linear', C=3)
clf.fit(XtrainSub, YtrainSub)
Ypred = clf.predict(Xtest)
err = np.sum(np.abs(Ypred-Ytest))/Ytest.shape[0]
mean = mean + err
if minErr > err:
minErr = err
minMIndex = MIndexSub
minFIndex = FIndexSub
else:
pass
print('Mean Error Ratio:', mean/10) # 0.132926829268
print('Min Error Ratio:', minErr) # 0.0792682926829
print('Male Index:', minMIndex) # [756 770 496 666 519 540 506 635 818 867]
print('Female Index:', minFIndex) # [278 328 434 51 209 466 270 93 439 213] | mit |
2014c2g12/c2g12 | c2wp/exts/w2/static/Brython2.0.0-20140209-164925/Lib/external_import.py | 742 | 2985 | import os
from browser import doc
import urllib.request
## this module is able to download modules that are external to
## localhost/src
## so we could download from any URL
class ModuleFinder:
def __init__(self, path_entry):
print("external_import here..")
#print(path_entry)
self._module=None
if path_entry.startswith('http://'):
self.path_entry=path_entry
else:
raise ImportError()
def __str__(self):
return '<%s for "%s">' % (self.__class__.__name__, self.path_entry)
def find_module(self, fullname, path=None):
path = path or self.path_entry
#print('looking for "%s" in %s ...' % (fullname, path))
for _ext in ['js', 'pyj', 'py']:
_fp,_url,_headers=urllib.request.urlopen(path + '/' + '%s.%s' % (fullname, _ext))
self._module=_fp.read()
_fp.close()
if self._module is not None:
print("module found at %s:%s" % (path, fullname))
return ModuleLoader(path, fullname, self._module)
print('module %s not found' % fullname)
raise ImportError()
return None
class ModuleLoader:
"""Load source for modules"""
def __init__(self, filepath, name, module_source):
self._filepath=filepath
self._name=name
self._module_source=module_source
def get_source(self):
return self._module_source
def is_package(self):
return '.' in self._name
def load_module(self):
if self._name in sys.modules:
#print('reusing existing module from previous import of "%s"' % fullname)
mod = sys.modules[self._name]
return mod
_src=self.get_source()
if self._filepath.endswith('.js'):
mod=JSObject(import_js_module(_src, self._filepath, self._name))
elif self._filepath.endswith('.py'):
mod=JSObject(import_py_module(_src, self._filepath, self._name))
elif self._filepath.endswith('.pyj'):
mod=JSObject(import_pyj_module(_src, self._filepath, self._name))
else:
raise ImportError('Invalid Module: %s' % self._filepath)
# Set a few properties required by PEP 302
mod.__file__ = self._filepath
mod.__name__ = self._name
mod.__path__ = os.path.abspath(self._filepath)
mod.__loader__ = self
mod.__package__ = '.'.join(self._name.split('.')[:-1])
if self.is_package():
print('adding path for package')
# Set __path__ for packages
# so we can find the sub-modules.
mod.__path__ = [ self._filepath ]
else:
print('imported as regular module')
print('creating a new module object for "%s"' % self._name)
sys.modules.setdefault(self._name, mod)
JSObject(__BRYTHON__.imported)[self._name]=mod
return mod
| gpl-2.0 |
freedomtan/tensorflow | tensorflow/python/tpu/tpu_system_metadata.py | 15 | 8880 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU system metadata and associated tooling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu
from tensorflow.python.util.tf_export import tf_export
_PINGING_MASTER_TIMEOUT_IN_MS = 5 * 60 * 1000 # 10 min
_RETRY_TIMES = 12 * 24 # 1 day
_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS = 300 * 1000 # 5 mins
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
@tf_export('tpu.experimental.TPUSystemMetadata')
class TPUSystemMetadata(
collections.namedtuple('TPUSystemMetadata', [
'num_cores',
'num_hosts',
'num_of_cores_per_host',
'topology',
'devices',
])):
"""Describes some metadata about the TPU system.
Attributes:
num_cores: interger. Total number of TPU cores in the TPU system.
num_hosts: interger. Total number of hosts (TPU workers) in the TPU system.
num_of_cores_per_host: interger. Number of TPU cores per host (TPU worker).
topology: an instance of `tf.tpu.experimental.Topology`, which describes the
physical topology of TPU system.
devices: a tuple of strings, which describes all the TPU devices in the
system.
"""
def __new__(cls, num_cores, num_hosts, num_of_cores_per_host, topology,
devices):
return super(TPUSystemMetadata,
cls).__new__(cls, num_cores, num_hosts, num_of_cores_per_host,
topology, devices)
def _query_tpu_system_metadata(master_address, cluster_def=None,
query_topology=False):
"""Automatically detects the TPU system metadata in the system."""
tpu_core_count = 0
devices = []
device_dict = collections.defaultdict(list)
if context.executing_eagerly():
logical_devices = config.list_logical_devices()
# We want the output type to match in both eager and session mode
devices = [session_lib._DeviceAttributes(device_util.canonicalize(d.name), # pylint: disable=protected-access
d.device_type, 0, 0)
for d in logical_devices]
else:
# TODO(b/120564445): Replace with standard library for retries.
retry_count = 1
while True:
logging.info('Querying Tensorflow master (%s) for TPU system metadata.',
master_address)
try:
with ops.Graph().as_default():
with session_lib.Session(
master_address,
config=get_session_config_with_timeout(
_PINGING_MASTER_TIMEOUT_IN_MS,
cluster_def)) as sess:
devices = sess.list_devices()
break
except errors.DeadlineExceededError:
msg = ('Failed to connect to the Tensorflow master. The TPU worker may '
'not be ready (still scheduling) or the Tensorflow master '
'address is incorrect: got (%s).' %
(master_address))
# TODO(xiejw): For local or grpc master we might not need retry logic
# here.
if retry_count <= _RETRY_TIMES:
logging.warning('%s', msg)
logging.warning('Retrying (%d/%d).', retry_count, _RETRY_TIMES)
retry_count += 1
else:
raise ValueError(msg)
for device in devices:
spec = tf_device.DeviceSpec.from_string(device.name)
if spec.device_type == 'TPU':
device_dict[spec.task].append(spec.device_index)
tpu_core_count += 1
num_of_cores_per_host = 0
if tpu_core_count:
num_cores_per_host_set = set(
[len(core_ids) for core_ids in device_dict.values()])
if len(num_cores_per_host_set) != 1:
raise RuntimeError(
'TPU cores on each host is not same. This should not happen!. '
'devices: {}'.format(devices))
num_of_cores_per_host = num_cores_per_host_set.pop()
topology = None
if query_topology:
if not tpu_core_count:
raise RuntimeError(
'Cannot find any TPU cores in the system (master address {}). '
'This usually means the master address is incorrect or the '
'TPU worker has some problems. Available devices: {}'.format(
master_address, devices))
topology = _obtain_topology(master_address, cluster_def)
# We sort the metadata devices so that downstream users get a sorted list
# for creating mirrored variables correctly.
def _sort_key(device):
spec = tf_device.DeviceSpec.from_string(device.name)
return (spec.job, spec.replica, spec.task, spec.device_type,
spec.device_index)
devices = tuple(sorted(devices, key=_sort_key))
metadata = TPUSystemMetadata(
num_cores=tpu_core_count,
num_hosts=len(device_dict),
num_of_cores_per_host=num_of_cores_per_host,
topology=topology,
devices=devices)
if tpu_core_count:
logging.info('Found TPU system:')
logging.info('*** Num TPU Cores: %d', metadata.num_cores)
logging.info('*** Num TPU Workers: %d', metadata.num_hosts)
logging.info('*** Num TPU Cores Per Worker: %d',
metadata.num_of_cores_per_host)
for device in metadata.devices:
logging.info('*** Available Device: %s', device)
else:
logging.info('Failed to find TPU: %s', metadata)
return metadata
def _obtain_topology(master_address, cluster_def):
"""Obtains TPU fabric topology."""
try:
logging.info('Initializing TPU system (master: %s) to fetch topology '
'for model parallelism. This might take a while.',
master_address)
with ops.Graph().as_default():
session_config = get_session_config_with_timeout(
_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS, cluster_def)
with session_lib.Session(
master_address, config=session_config) as sess:
topology = sess.run(tpu.initialize_system())
return topology
except errors.DeadlineExceededError:
raise ValueError(
'Fail to initialize TPU system with master (%s). '
'Please double check the TPU system is functional.' % (
master_address))
def get_session_config_with_timeout(timeout_in_secs, cluster_def):
"""Returns a session given a timeout and a cluster configuration."""
config_proto = config_pb2.ConfigProto(
operation_timeout_in_ms=timeout_in_secs, cluster_def=cluster_def)
return config_proto
def master_job(master, cluster_def):
"""Returns the canonical job name to use to place TPU computations on.
Args:
master: A `string` representing the TensorFlow master to use.
cluster_def: A ClusterDef object describing the TPU cluster.
Returns:
A string containing the job name, or None if no job should be specified.
Raises:
ValueError: If the user needs to specify a tpu_job_name, because we are
unable to infer the job name automatically, or if the user-specified job
names are inappropriate.
"""
# If the user specifies the tpu_job_name, use that.
if master in _LOCAL_MASTERS:
return None
if (not cluster_def or not cluster_def.job):
return _DEFAULT_JOB_NAME
job_names = set(job.name for job in cluster_def.job)
if _DEFAULT_JOB_NAME in job_names:
# b/37868888 tracks allowing ClusterSpec propagation to reuse job names.
raise ValueError('Currently, tpu_worker is not an allowed job name.')
if len(job_names) == 1:
return cluster_def.job[0].name
if len(job_names) == 2:
if _DEFAULT_COORDINATOR_JOB_NAME in job_names:
job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)
return job_names.pop()
# TODO(b/67716447): Include more sophisticated heuristics.
raise ValueError('Could not infer TPU job name.')
| apache-2.0 |
hdznrrd/parseiq | piq.py | 1 | 2070 | """\
Usage: piq.py dump [-o OFFSET] [-f FRAMES] FILE
piq.py findreftick [-o OFFSET] [-f FRAMES] FILE
piq.py findpattern [-o OFFSET] [-f FRAMES] PATTERN FILE
Arguments:
FILE Input file in complex64 NPY format
PATTERN Pattern in complex64 NPY format to be searched in FILE
Options:
-h --help Show this help message and exit
-o OFFSET Number of frames to skip before processing [default: 0]
-f FRAMES Limit number of frames to process (at most)
"""
from docopt import docopt
import wave
import struct
import numpy as np
class Piq(object):
"""Application class for piq"""
def __init__(self, arguments):
self.arguments = arguments
self.haystack = {}
self.needle = {}
self.haystack['data'] = None
self.haystack['offset'] = 0
self.needle['data'] = None
self.needle['offset'] = 0
def do_dump(self):
"""Dump a file to stdout"""
for v in self.haystack['data']:
print v
def do_findpattern(self):
"""Find a pattern within another file"""
pass
def do_findreftick(self):
"""Find occurences of reference timer ticks"""
pass
def dispatch(self):
"""Dispatcher for the command interface"""
if self.arguments['dump']:
self.haystack['data'] = np.memmap(self.arguments['FILE'], mode='r', dtype=np.complex64)
self.do_dump()
elif self.arguments['findreftick']:
self.haystack['data'] = np.memmap(self.arguments['FILE'], mode='r', dtype=np.complex64)
self.do_findreftick()
elif self.arguments['findpattern']:
self.needle['data'] = np.memmap(self.arguments['PATTERN'], mode='r', dtype=np.complex64)
self.haystack['data'] = np.memmap(self.arguments['FILE'], mode='r', dtype=np.complex64)
self.do_findpattern()
def run(self):
"""Entry point of the application"""
self.dispatch()
if __name__ == '__main__':
Piq(docopt(__doc__)).run()
| mit |
baylee-d/osf.io | osf/utils/notifications.py | 5 | 4713 | from django.utils import timezone
from website.mails import mails
from website.reviews import signals as reviews_signals
from website.settings import DOMAIN, OSF_SUPPORT_EMAIL, OSF_CONTACT_EMAIL
from osf.utils.workflows import RegistrationModerationTriggers
def get_email_template_context(resource):
is_preprint = resource.provider.type == 'osf.preprintprovider'
url_segment = 'preprints' if is_preprint else 'registries'
document_type = resource.provider.preprint_word if is_preprint else 'registration'
base_context = {
'domain': DOMAIN,
'reviewable': resource,
'workflow': resource.provider.reviews_workflow,
'provider_url': resource.provider.domain or f'{DOMAIN}{url_segment}/{resource.provider._id}',
'provider_contact_email': resource.provider.email_contact or OSF_CONTACT_EMAIL,
'provider_support_email': resource.provider.email_support or OSF_SUPPORT_EMAIL,
'document_type': document_type
}
if document_type == 'registration':
base_context['draft_registration'] = resource.draft_registration.get()
if document_type == 'registration' and resource.provider.brand:
brand = resource.provider.brand
base_context['logo_url'] = brand.hero_logo_image
base_context['top_bar_color'] = brand.primary_color
base_context['provider_name'] = resource.provider.name
return base_context
def notify_submit(resource, user, *args, **kwargs):
context = get_email_template_context(resource)
context['referrer'] = user
recipients = list(resource.contributors)
reviews_signals.reviews_email_submit.send(
context=context,
recipients=recipients
)
reviews_signals.reviews_email_submit_moderators_notifications.send(
timestamp=timezone.now(),
context=context
)
def notify_resubmit(resource, user, action, *args, **kwargs):
context = get_email_template_context(resource)
reviews_signals.reviews_email.send(
creator=user,
context=context,
template='reviews_resubmission_confirmation',
action=action
)
def notify_accept_reject(resource, user, action, states, *args, **kwargs):
context = get_email_template_context(resource)
context['notify_comment'] = not resource.provider.reviews_comments_private and action.comment
context['comment'] = action.comment
context['requester'] = action.creator
context['is_rejected'] = action.to_state == states.REJECTED.db_name
context['was_pending'] = action.from_state == states.PENDING.db_name
reviews_signals.reviews_email.send(
creator=user,
context=context,
template='reviews_submission_status',
action=action
)
def notify_edit_comment(resource, user, action, *args, **kwargs):
if not resource.provider.reviews_comments_private and action.comment:
context = get_email_template_context(resource)
context['comment'] = action.comment
reviews_signals.reviews_email.send(
creator=user,
context=context,
template='reviews_update_comment',
action=action
)
def notify_reject_withdraw_request(resource, action, *args, **kwargs):
context = get_email_template_context(resource)
context['requester'] = action.creator
for contributor in resource.contributors.all():
context['contributor'] = contributor
context['requester'] = action.creator
context['is_requester'] = action.creator == contributor
mails.send_mail(
contributor.username,
mails.WITHDRAWAL_REQUEST_DECLINED,
**context
)
def notify_moderator_registration_requests_withdrawal(resource, user, *args, **kwargs):
context = get_email_template_context(resource)
context['referrer'] = user
reviews_signals.reviews_withdraw_requests_notification_moderators.send(
timestamp=timezone.now(),
context=context
)
def notify_withdraw_registration(resource, action, *args, **kwargs):
context = get_email_template_context(resource)
context['force_withdrawal'] = action.trigger == RegistrationModerationTriggers.FORCE_WITHDRAW.db_name
context['requester'] = resource.retraction.initiated_by
context['comment'] = action.comment
context['notify_comment'] = not resource.provider.reviews_comments_private and action.comment
for contributor in resource.contributors.all():
context['contributor'] = contributor
context['is_requester'] = context['requester'] == contributor
mails.send_mail(
contributor.username,
mails.WITHDRAWAL_REQUEST_GRANTED,
**context
)
| apache-2.0 |
openstack/tempest | tempest/scenario/test_minbw_allocation_placement.py | 1 | 21973 | # Copyright (c) 2019 Ericsson
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest.scenario import manager
CONF = config.CONF
class MinBwAllocationPlacementTest(manager.NetworkScenarioTest):
credentials = ['primary', 'admin']
required_extensions = ['port-resource-request',
'qos',
'qos-bw-minimum-ingress']
# The feature QoS minimum bandwidth allocation in Placement API depends on
# Granular resource requests to GET /allocation_candidates and Support
# allocation candidates with nested resource providers features in
# Placement (see: https://specs.openstack.org/openstack/nova-specs/specs/
# stein/approved/bandwidth-resource-provider.html#rest-api-impact) and this
# means that the minimum placement microversion is 1.29
placement_min_microversion = '1.29'
placement_max_microversion = 'latest'
# Nova rejects to boot VM with port which has resource_request field, below
# microversion 2.72
compute_min_microversion = '2.72'
compute_max_microversion = 'latest'
INGRESS_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
INGRESS_DIRECTION = 'ingress'
SMALLEST_POSSIBLE_BW = 1
# For any realistic inventory value (that is inventory != MAX_INT) an
# allocation candidate request of MAX_INT is expected to be rejected, see:
# https://github.com/openstack/placement/blob/master/placement/
# db/constants.py#L16
PLACEMENT_MAX_INT = 0x7FFFFFFF
BANDWIDTH_1 = 1000
BANDWIDTH_2 = 2000
@classmethod
def setup_clients(cls):
super(MinBwAllocationPlacementTest, cls).setup_clients()
cls.placement_client = cls.os_admin.placement_client
cls.networks_client = cls.os_admin.networks_client
cls.subnets_client = cls.os_admin.subnets_client
cls.ports_client = cls.os_primary.ports_client
cls.routers_client = cls.os_adm.routers_client
cls.qos_client = cls.os_admin.qos_client
cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
cls.flavors_client = cls.os_adm.flavors_client
cls.servers_client = cls.os_adm.servers_client
@classmethod
def skip_checks(cls):
super(MinBwAllocationPlacementTest, cls).skip_checks()
if not CONF.network_feature_enabled.qos_placement_physnet:
msg = "Skipped as no physnet is available in config for " \
"placement based QoS allocation."
raise cls.skipException(msg)
def setUp(self):
super(MinBwAllocationPlacementTest, self).setUp()
self._check_if_allocation_is_possible()
def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
policy = self.qos_client.create_qos_policy(
name=data_utils.rand_name(name_prefix),
shared=True)['policy']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.qos_client.delete_qos_policy, policy['id'])
rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
policy['id'],
**{
'min_kbps': min_kbps,
'direction': self.INGRESS_DIRECTION
})['minimum_bandwidth_rule']
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.qos_min_bw_client.delete_minimum_bandwidth_rule, policy['id'],
rule['id'])
return policy
def _create_qos_basic_policies(self):
self.qos_policy_valid = self._create_policy_and_min_bw_rule(
name_prefix='test_policy_valid',
min_kbps=self.SMALLEST_POSSIBLE_BW)
self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
name_prefix='test_policy_not_valid',
min_kbps=self.PLACEMENT_MAX_INT)
def _create_qos_policies_from_life(self):
# For tempest-slow the max bandwidth configured is 1000000,
# https://opendev.org/openstack/tempest/src/branch/master/
# .zuul.yaml#L416-L420
self.qos_policy_1 = self._create_policy_and_min_bw_rule(
name_prefix='test_policy_1',
min_kbps=self.BANDWIDTH_1
)
self.qos_policy_2 = self._create_policy_and_min_bw_rule(
name_prefix='test_policy_2',
min_kbps=self.BANDWIDTH_2
)
def _create_network_and_qos_policies(self, policy_method):
physnet_name = CONF.network_feature_enabled.qos_placement_physnet
base_segm = \
CONF.network_feature_enabled.provider_net_base_segmentation_id
self.prov_network, _, _ = self.setup_network_subnet_with_router(
networks_client=self.networks_client,
routers_client=self.routers_client,
subnets_client=self.subnets_client,
**{
'shared': True,
'provider:network_type': 'vlan',
'provider:physical_network': physnet_name,
'provider:segmentation_id': base_segm
})
policy_method()
def _check_if_allocation_is_possible(self):
alloc_candidates = self.placement_client.list_allocation_candidates(
resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
self.SMALLEST_POSSIBLE_BW))
if len(alloc_candidates['provider_summaries']) == 0:
# Skip if the backend does not support QoS minimum bandwidth
# allocation in Placement API
raise self.skipException(
'No allocation candidates are available for %s:%s' %
(self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
# Just to be sure check with impossible high (placement max_int),
# allocation
alloc_candidates = self.placement_client.list_allocation_candidates(
resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
self.PLACEMENT_MAX_INT))
if len(alloc_candidates['provider_summaries']) != 0:
self.fail('For %s:%s there should be no available candidate!' %
(self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'):
wait_until = (None if status == 'ERROR' else status)
port = self.create_port(
self.prov_network['id'], qos_policy_id=qos_policy_id)
server = self.create_server(networks=[{'port': port['id']}],
wait_until=wait_until)
waiters.wait_for_server_status(
client=self.os_primary.servers_client, server_id=server['id'],
status=status, ready_wait=False, raise_on_error=False)
return server, port
def _assert_allocation_is_as_expected(self, consumer, port_ids,
min_kbps=SMALLEST_POSSIBLE_BW):
allocations = self.placement_client.list_allocations(
consumer)['allocations']
self.assertGreater(len(allocations), 0)
bw_resource_in_alloc = False
for rp, resources in allocations.items():
if self.INGRESS_RESOURCE_CLASS in resources['resources']:
self.assertEqual(
min_kbps,
resources['resources'][self.INGRESS_RESOURCE_CLASS])
bw_resource_in_alloc = True
allocation_rp = rp
if min_kbps:
self.assertTrue(bw_resource_in_alloc)
# Check binding_profile of the port is not empty and equals with
# the rp uuid
for port_id in port_ids:
port = self.os_admin.ports_client.show_port(port_id)
self.assertEqual(
allocation_rp,
port['port']['binding:profile']['allocation'])
@decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
@utils.services('compute', 'network')
def test_qos_min_bw_allocation_basic(self):
""""Basic scenario with QoS min bw allocation in placement.
Steps:
* Create prerequisites:
** VLAN type provider network with subnet.
** valid QoS policy with minimum bandwidth rule with min_kbps=1
(This is a simplification to skip the checks in placement for
detecting the resource provider tree and inventories, as if
bandwidth resource is available 1 kbs will be available).
** invalid QoS policy with minimum bandwidth rule with
min_kbs=max integer from placement (this is a simplification again
to avoid detection of RP tress and inventories, as placement will
reject such big allocation).
* Create port with valid QoS policy, and boot VM with that, it should
pass.
* Create port with invalid QoS policy, and try to boot VM with that,
it should fail.
"""
self._create_network_and_qos_policies(self._create_qos_basic_policies)
server1, valid_port = self._boot_vm_with_min_bw(
qos_policy_id=self.qos_policy_valid['id'])
self._assert_allocation_is_as_expected(server1['id'],
[valid_port['id']])
server2, not_valid_port = self._boot_vm_with_min_bw(
self.qos_policy_not_valid['id'], status='ERROR')
allocations = self.placement_client.list_allocations(server2['id'])
self.assertEqual(0, len(allocations['allocations']))
server2 = self.servers_client.show_server(server2['id'])
self.assertIn('fault', server2['server'])
self.assertIn('No valid host', server2['server']['fault']['message'])
# Check that binding_profile of the port is empty
port = self.os_admin.ports_client.show_port(not_valid_port['id'])
self.assertEqual(0, len(port['port']['binding:profile']))
@decorators.idempotent_id('8a98150c-a506-49a5-96c6-73a5e7b04ada')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration is not available.')
@testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
'Less than 2 compute nodes, skipping multinode '
'tests.')
@utils.services('compute', 'network')
def test_migrate_with_qos_min_bw_allocation(self):
"""Scenario to migrate VM with QoS min bw allocation in placement
Boot a VM like in test_qos_min_bw_allocation_basic, do the same
checks, and
* migrate the server
* confirm the resize, if the VM state is VERIFY_RESIZE
* If the VM goes to ACTIVE state check that allocations are as
expected.
"""
self._create_network_and_qos_policies(self._create_qos_basic_policies)
server, valid_port = self._boot_vm_with_min_bw(
qos_policy_id=self.qos_policy_valid['id'])
self._assert_allocation_is_as_expected(server['id'],
[valid_port['id']])
self.servers_client.migrate_server(server_id=server['id'])
waiters.wait_for_server_status(
client=self.os_primary.servers_client, server_id=server['id'],
status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
# TODO(lajoskatona): Check that the allocations are ok for the
# migration?
self._assert_allocation_is_as_expected(server['id'],
[valid_port['id']])
self.servers_client.confirm_resize_server(server_id=server['id'])
waiters.wait_for_server_status(
client=self.os_primary.servers_client, server_id=server['id'],
status='ACTIVE', ready_wait=False, raise_on_error=True)
self._assert_allocation_is_as_expected(server['id'],
[valid_port['id']])
@decorators.idempotent_id('c29e7fd3-035d-4993-880f-70819847683f')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@utils.services('compute', 'network')
def test_resize_with_qos_min_bw_allocation(self):
"""Scenario to resize VM with QoS min bw allocation in placement.
Boot a VM like in test_qos_min_bw_allocation_basic, do the same
checks, and
* resize the server with new flavor
* confirm the resize, if the VM state is VERIFY_RESIZE
* If the VM goes to ACTIVE state check that allocations are as
expected.
"""
self._create_network_and_qos_policies(self._create_qos_basic_policies)
server, valid_port = self._boot_vm_with_min_bw(
qos_policy_id=self.qos_policy_valid['id'])
self._assert_allocation_is_as_expected(server['id'],
[valid_port['id']])
old_flavor = self.flavors_client.show_flavor(
CONF.compute.flavor_ref)['flavor']
new_flavor = self.flavors_client.create_flavor(**{
'ram': old_flavor['ram'],
'vcpus': old_flavor['vcpus'],
'name': old_flavor['name'] + 'extra',
'disk': old_flavor['disk'] + 1
})['flavor']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.flavors_client.delete_flavor, new_flavor['id'])
self.servers_client.resize_server(
server_id=server['id'], flavor_ref=new_flavor['id'])
waiters.wait_for_server_status(
client=self.os_primary.servers_client, server_id=server['id'],
status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
# TODO(lajoskatona): Check that the allocations are ok for the
# migration?
self._assert_allocation_is_as_expected(server['id'],
[valid_port['id']])
self.servers_client.confirm_resize_server(server_id=server['id'])
waiters.wait_for_server_status(
client=self.os_primary.servers_client, server_id=server['id'],
status='ACTIVE', ready_wait=False, raise_on_error=True)
self._assert_allocation_is_as_expected(server['id'],
[valid_port['id']])
@decorators.idempotent_id('79fdaa1c-df62-4738-a0f0-1cff9dc415f6')
@utils.services('compute', 'network')
def test_qos_min_bw_allocation_update_policy(self):
"""Test the update of QoS policy on bound port
Related RFE in neutron: #1882804
The scenario is the following:
* Have a port with QoS policy and minimum bandwidth rule.
* Boot a VM with the port.
* Update the port with a new policy with different minimum bandwidth
values.
* The allocation on placement side should be according to the new
rules.
"""
if not utils.is_network_feature_enabled('update_port_qos'):
raise self.skipException("update_port_qos feature is not enabled")
self._create_network_and_qos_policies(
self._create_qos_policies_from_life)
port = self.create_port(
self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
server1 = self.create_server(
networks=[{'port': port['id']}])
self._assert_allocation_is_as_expected(server1['id'], [port['id']],
self.BANDWIDTH_1)
self.ports_client.update_port(
port['id'],
**{'qos_policy_id': self.qos_policy_2['id']})
self._assert_allocation_is_as_expected(server1['id'], [port['id']],
self.BANDWIDTH_2)
# I changed my mind
self.ports_client.update_port(
port['id'],
**{'qos_policy_id': self.qos_policy_1['id']})
self._assert_allocation_is_as_expected(server1['id'], [port['id']],
self.BANDWIDTH_1)
# bad request....
self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
name_prefix='test_policy_not_valid',
min_kbps=self.PLACEMENT_MAX_INT)
port_orig = self.ports_client.show_port(port['id'])['port']
self.assertRaises(
lib_exc.Conflict,
self.ports_client.update_port,
port['id'], **{'qos_policy_id': self.qos_policy_not_valid['id']})
self._assert_allocation_is_as_expected(server1['id'], [port['id']],
self.BANDWIDTH_1)
port_upd = self.ports_client.show_port(port['id'])['port']
self.assertEqual(port_orig['qos_policy_id'],
port_upd['qos_policy_id'])
self.assertEqual(self.qos_policy_1['id'], port_upd['qos_policy_id'])
@decorators.idempotent_id('9cfc3bb8-f433-4c91-87b6-747cadc8958a')
@utils.services('compute', 'network')
def test_qos_min_bw_allocation_update_policy_from_zero(self):
"""Test port without QoS policy to have QoS policy
This scenario checks if updating a port without QoS policy to
have QoS policy with minimum_bandwidth rule succeeds only on
controlplane, but placement allocation remains 0.
"""
if not utils.is_network_feature_enabled('update_port_qos'):
raise self.skipException("update_port_qos feature is not enabled")
self._create_network_and_qos_policies(
self._create_qos_policies_from_life)
port = self.create_port(self.prov_network['id'])
server1 = self.create_server(
networks=[{'port': port['id']}])
self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
self.ports_client.update_port(
port['id'], **{'qos_policy_id': self.qos_policy_2['id']})
self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
@decorators.idempotent_id('a9725a70-1d28-4e3b-ae0e-450abc235962')
@utils.services('compute', 'network')
def test_qos_min_bw_allocation_update_policy_to_zero(self):
"""Test port with QoS policy to remove QoS policy
In this scenario port with QoS minimum_bandwidth rule update to
remove QoS policy results in 0 placement allocation.
"""
if not utils.is_network_feature_enabled('update_port_qos'):
raise self.skipException("update_port_qos feature is not enabled")
self._create_network_and_qos_policies(
self._create_qos_policies_from_life)
port = self.create_port(
self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
server1 = self.create_server(
networks=[{'port': port['id']}])
self._assert_allocation_is_as_expected(server1['id'], [port['id']],
self.BANDWIDTH_1)
self.ports_client.update_port(
port['id'],
**{'qos_policy_id': None})
self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
@decorators.idempotent_id('756ced7f-6f1a-43e7-a851-2fcfc16f3dd7')
@utils.services('compute', 'network')
def test_qos_min_bw_allocation_update_with_multiple_ports(self):
if not utils.is_network_feature_enabled('update_port_qos'):
raise self.skipException("update_port_qos feature is not enabled")
self._create_network_and_qos_policies(
self._create_qos_policies_from_life)
port1 = self.create_port(
self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
port2 = self.create_port(
self.prov_network['id'], qos_policy_id=self.qos_policy_2['id'])
server1 = self.create_server(
networks=[{'port': port1['id']}, {'port': port2['id']}])
self._assert_allocation_is_as_expected(
server1['id'], [port1['id'], port2['id']],
self.BANDWIDTH_1 + self.BANDWIDTH_2)
self.ports_client.update_port(
port1['id'],
**{'qos_policy_id': self.qos_policy_2['id']})
self._assert_allocation_is_as_expected(
server1['id'], [port1['id'], port2['id']],
2 * self.BANDWIDTH_2)
@decorators.idempotent_id('0805779e-e03c-44fb-900f-ce97a790653b')
@utils.services('compute', 'network')
def test_empty_update(self):
if not utils.is_network_feature_enabled('update_port_qos'):
raise self.skipException("update_port_qos feature is not enabled")
self._create_network_and_qos_policies(
self._create_qos_policies_from_life)
port = self.create_port(
self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
server1 = self.create_server(
networks=[{'port': port['id']}])
self._assert_allocation_is_as_expected(server1['id'], [port['id']],
self.BANDWIDTH_1)
self.ports_client.update_port(
port['id'],
**{'description': 'foo'})
self._assert_allocation_is_as_expected(server1['id'], [port['id']],
self.BANDWIDTH_1)
| apache-2.0 |
Plain-Andy-legacy/android_external_chromium_org | chrome/common/extensions/docs/server2/samples_data_source.py | 25 | 2735 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import traceback
from data_source import DataSource
from extensions_paths import EXAMPLES
from future import All, Future
from platform_util import GetPlatforms
def _GetSampleId(sample_name):
return sample_name.lower().replace(' ', '-')
def GetAcceptedLanguages(request):
if request is None:
return []
accept_language = request.headers.get('Accept-Language', None)
if accept_language is None:
return []
return [lang_with_q.split(';')[0].strip()
for lang_with_q in accept_language.split(',')]
def CreateSamplesView(samples_list, request):
return_list = []
for dict_ in samples_list:
name = dict_['name']
description = dict_['description']
if description is None:
description = ''
if name.startswith('__MSG_') or description.startswith('__MSG_'):
try:
# Copy the sample dict so we don't change the dict in the cache.
sample_data = dict_.copy()
name_key = name[len('__MSG_'):-len('__')]
description_key = description[len('__MSG_'):-len('__')]
locale = sample_data['default_locale']
for lang in GetAcceptedLanguages(request):
if lang in sample_data['locales']:
locale = lang
break
locale_data = sample_data['locales'][locale]
sample_data['name'] = locale_data[name_key]['message']
sample_data['description'] = locale_data[description_key]['message']
sample_data['id'] = _GetSampleId(sample_data['name'])
except Exception:
logging.error(traceback.format_exc())
# Revert the sample to the original dict.
sample_data = dict_
return_list.append(sample_data)
else:
dict_['id'] = _GetSampleId(name)
return_list.append(dict_)
return return_list
class SamplesDataSource(DataSource):
'''Constructs a list of samples and their respective files and api calls.
'''
def __init__(self, server_instance, request):
self._platform_bundle = server_instance.platform_bundle
self._request = request
def _GetImpl(self, platform):
cache = self._platform_bundle.GetSamplesModel(platform).GetCache()
create_view = lambda samp_list: CreateSamplesView(samp_list, self._request)
return cache.GetFromFileListing('' if platform == 'apps'
else EXAMPLES).Then(create_view)
def get(self, platform):
return self._GetImpl(platform).Get()
def GetRefreshPaths(self):
return [platform for platform in GetPlatforms()]
def Refresh(self, path):
return self._GetImpl(path)
| bsd-3-clause |
win0x86/Lab | mitm/jinja2/lexer.py | 635 | 28393 | # -*- coding: utf-8 -*-
"""
jinja2.lexer
~~~~~~~~~~~~
This module implements a Jinja / Python combination lexer. The
`Lexer` class provided by this module is used to do some preprocessing
for Jinja.
On the one hand it filters out invalid operators like the bitshift
operators we don't allow in templates. On the other hand it separates
template code and python code in expressions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from operator import itemgetter
from collections import deque
from jinja2.exceptions import TemplateSyntaxError
from jinja2.utils import LRUCache
from jinja2._compat import next, iteritems, implements_iterator, text_type, \
intern
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r'\s+', re.U)
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
integer_re = re.compile(r'\d+')
# we use the unicode identifier rule if this python version is able
# to handle unicode identifiers, otherwise the standard ASCII one.
try:
compile('föö', '<unknown>', 'eval')
except SyntaxError:
name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
else:
from jinja2 import _stringdefs
name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
_stringdefs.xid_continue))
float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')
# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
TOKEN_MUL = intern('mul')
TOKEN_NE = intern('ne')
TOKEN_PIPE = intern('pipe')
TOKEN_POW = intern('pow')
TOKEN_RBRACE = intern('rbrace')
TOKEN_RBRACKET = intern('rbracket')
TOKEN_RPAREN = intern('rparen')
TOKEN_SEMICOLON = intern('semicolon')
TOKEN_SUB = intern('sub')
TOKEN_TILDE = intern('tilde')
TOKEN_WHITESPACE = intern('whitespace')
TOKEN_FLOAT = intern('float')
TOKEN_INTEGER = intern('integer')
TOKEN_NAME = intern('name')
TOKEN_STRING = intern('string')
TOKEN_OPERATOR = intern('operator')
TOKEN_BLOCK_BEGIN = intern('block_begin')
TOKEN_BLOCK_END = intern('block_end')
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
TOKEN_VARIABLE_END = intern('variable_end')
TOKEN_RAW_BEGIN = intern('raw_begin')
TOKEN_RAW_END = intern('raw_end')
TOKEN_COMMENT_BEGIN = intern('comment_begin')
TOKEN_COMMENT_END = intern('comment_end')
TOKEN_COMMENT = intern('comment')
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
TOKEN_LINECOMMENT_END = intern('linecomment_end')
TOKEN_LINECOMMENT = intern('linecomment')
TOKEN_DATA = intern('data')
TOKEN_INITIAL = intern('initial')
TOKEN_EOF = intern('eof')
# bind operators to token types
operators = {
'+': TOKEN_ADD,
'-': TOKEN_SUB,
'/': TOKEN_DIV,
'//': TOKEN_FLOORDIV,
'*': TOKEN_MUL,
'%': TOKEN_MOD,
'**': TOKEN_POW,
'~': TOKEN_TILDE,
'[': TOKEN_LBRACKET,
']': TOKEN_RBRACKET,
'(': TOKEN_LPAREN,
')': TOKEN_RPAREN,
'{': TOKEN_LBRACE,
'}': TOKEN_RBRACE,
'==': TOKEN_EQ,
'!=': TOKEN_NE,
'>': TOKEN_GT,
'>=': TOKEN_GTEQ,
'<': TOKEN_LT,
'<=': TOKEN_LTEQ,
'=': TOKEN_ASSIGN,
'.': TOKEN_DOT,
':': TOKEN_COLON,
'|': TOKEN_PIPE,
',': TOKEN_COMMA,
';': TOKEN_SEMICOLON
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
sorted(operators, key=lambda x: -len(x))))
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
TOKEN_COMMENT_END, TOKEN_WHITESPACE,
TOKEN_WHITESPACE, TOKEN_LINECOMMENT_BEGIN,
TOKEN_LINECOMMENT_END, TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
TOKEN_COMMENT, TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: 'begin of comment',
TOKEN_COMMENT_END: 'end of comment',
TOKEN_COMMENT: 'comment',
TOKEN_LINECOMMENT: 'comment',
TOKEN_BLOCK_BEGIN: 'begin of statement block',
TOKEN_BLOCK_END: 'end of statement block',
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
TOKEN_VARIABLE_END: 'end of print statement',
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
TOKEN_LINESTATEMENT_END: 'end of line statement',
TOKEN_DATA: 'template data / text',
TOKEN_EOF: 'end of template'
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(len(environment.comment_start_string), 'comment',
e(environment.comment_start_string)),
(len(environment.block_start_string), 'block',
e(environment.block_start_string)),
(len(environment.variable_start_string), 'variable',
e(environment.variable_start_string))
]
if environment.line_statement_prefix is not None:
rules.append((len(environment.line_statement_prefix), 'linestatement',
r'^[ \t\v]*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
rules.append((len(environment.line_comment_prefix), 'linecomment',
r'(?:^|(?<=\S))[^\S\r\n]*' +
e(environment.line_comment_prefix)))
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == 'name':
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ':' in expr:
return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return 'Token(%r, %r, %r)' % (
self.lineno,
self.type,
self.value
)
@implements_iterator
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def __next__(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
@implements_iterator
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._iter = iter(generator)
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
__nonzero__ = __bool__ # py2
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for x in range(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def __next__(self):
"""Go one token ahead and return the old one"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = next(self._iter)
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._iter = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError('unexpected end of template, '
'expected %r.' % expr,
self.current.lineno,
self.name, self.filename)
raise TemplateSyntaxError("expected token %r, got %r" %
(expr, describe_token(self.current)),
self.current.lineno,
self.name, self.filename)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None)
]
# assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
# strip leading spaces if lstrip_blocks is enabled
prefix_re = {}
if environment.lstrip_blocks:
# use '{%+' to manually disable lstrip_blocks behavior
no_lstrip_re = e('+')
# detect overlap between block and variable or comment strings
block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
# make sure we don't mistake a block for a variable or a comment
m = block_diff.match(environment.comment_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
m = block_diff.match(environment.variable_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
# detect overlap between comment and variable strings
comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
m = comment_diff.match(environment.variable_start_string)
no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
lstrip_re = r'^[ \t]*'
block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
lstrip_re,
e(environment.block_start_string),
no_lstrip_re,
e(environment.block_start_string),
)
comment_prefix_re = r'%s%s%s|%s\+?' % (
lstrip_re,
e(environment.comment_start_string),
no_variable_re,
e(environment.comment_start_string),
)
prefix_re['block'] = block_prefix_re
prefix_re['comment'] = comment_prefix_re
else:
block_prefix_re = '%s' % e(environment.block_start_string)
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
'root': [
# directives
(c('(.*?)(?:%s)' % '|'.join(
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string)
)] + [
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
for n, r in root_tag_rules
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
# data
(c('.+'), TOKEN_DATA, None)
],
# comments
TOKEN_COMMENT_BEGIN: [
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
(c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
TOKEN_BLOCK_BEGIN: [
(c('(?:\-%s\s*|%s)%s' % (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), TOKEN_BLOCK_END, '#pop'),
] + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(c('\-%s\s*|%s' % (
e(environment.variable_end_string),
e(environment.variable_end_string)
)), TOKEN_VARIABLE_END, '#pop')
] + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
(c('(.)'), (Failure('Missing end of raw directive'),), None)
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
] + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
TOKEN_LINECOMMENT_END), '#pop')
]
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normalize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream.
"""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == 'linestatement_begin':
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
# we are not interested in those tokens in the parser
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
value = str(value)
elif token == 'string':
# try to unescape string
try:
value = self._normalize_newlines(value[1:-1]) \
.encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
except Exception as e:
msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
# if we can express it as bytestring (ascii only)
# we do that for support of semi broken APIs
# as datetime.datetime.strftime. On python 3 this
# call becomes a noop thanks to 2to3
try:
value = str(value)
except UnicodeError:
pass
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
for newline in ('\r\n', '\r', '\n'):
if source.endswith(newline):
lines.append('')
break
source = '\n'.join(lines)
pos = 0
lineno = 1
stack = ['root']
if state is not None and state != 'root':
assert state in ('variable', 'block'), 'invalid state'
stack.append(state + '_begin')
else:
state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and \
tokens in ('variable_end', 'block_end',
'linestatement_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve '
'the token dynamically'
' but no group matched'
% regex)
# normal group
else:
data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count('\n')
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError('unexpected \'%s\'' %
data, lineno, name,
filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError('unexpected \'%s\', '
'expected \'%s\'' %
(data, expected_op),
lineno, name,
filename)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
elif new_state == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the '
'new state dynamically but'
' no group matched' %
regex)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without '
'stack change' % regex)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError('unexpected char %r at %d' %
(source[pos], pos), lineno,
name, filename)
| gpl-3.0 |
storm-computers/odoo | addons/website_forum/models/res_users.py | 15 | 4764 | # -*- coding: utf-8 -*-
from datetime import datetime
import hashlib
from urllib import urlencode
from openerp import models, fields, api
class Users(models.Model):
_inherit = 'res.users'
def __init__(self, pool, cr):
init_res = super(Users, self).__init__(pool, cr)
self.SELF_WRITEABLE_FIELDS = list(
set(
self.SELF_WRITEABLE_FIELDS +
['country_id', 'city', 'website', 'website_description', 'website_published']))
return init_res
create_date = fields.Datetime('Create Date', readonly=True, copy=False, index=True)
karma = fields.Integer('Karma', default=0)
badge_ids = fields.One2many('gamification.badge.user', 'user_id', string='Badges', copy=False)
gold_badge = fields.Integer('Gold badges count', compute="_get_user_badge_level")
silver_badge = fields.Integer('Silver badges count', compute="_get_user_badge_level")
bronze_badge = fields.Integer('Bronze badges count', compute="_get_user_badge_level")
@api.multi
@api.depends('badge_ids')
def _get_user_badge_level(self):
""" Return total badge per level of users
TDE CLEANME: shouldn't check type is forum ? """
for user in self:
user.gold_badge = 0
user.silver_badge = 0
user.bronze_badge = 0
self.env.cr.execute("""
SELECT bu.user_id, b.level, count(1)
FROM gamification_badge_user bu, gamification_badge b
WHERE bu.user_id IN %s
AND bu.badge_id = b.id
AND b.level IS NOT NULL
GROUP BY bu.user_id, b.level
ORDER BY bu.user_id;
""", [tuple(self.ids)])
for (user_id, level, count) in self.env.cr.fetchall():
# levels are gold, silver, bronze but fields have _badge postfix
self.browse(user_id)['{}_badge'.format(level)] = count
@api.model
def _generate_forum_token(self, user_id, email):
"""Return a token for email validation. This token is valid for the day
and is a hash based on a (secret) uuid generated by the forum module,
the user_id, the email and currently the day (to be updated if necessary). """
forum_uuid = self.env['ir.config_parameter'].sudo().get_param('website_forum.uuid')
return hashlib.sha256('%s-%s-%s-%s' % (
datetime.now().replace(hour=0, minute=0, second=0, microsecond=0),
forum_uuid,
user_id,
email)).hexdigest()
@api.one
def send_forum_validation_email(self, forum_id=None):
if not self.email:
return False
token = self._generate_forum_token(self.id, self.email)
activation_template = self.env.ref('website_forum.validation_email')
if activation_template:
params = {
'token': token,
'id': self.id,
'email': self.email}
if forum_id:
params['forum_id'] = forum_id
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
token_url = base_url + '/forum/validate_email?%s' % urlencode(params)
activation_template.sudo().with_context(token_url=token_url).send_mail(self.id, force_send=True)
return True
@api.one
def process_forum_validation_token(self, token, email, forum_id=None, context=None):
validation_token = self._generate_forum_token(self.id, email)
if token == validation_token and self.karma == 0:
karma = 3
forum = None
if forum_id:
forum = self.env['forum.forum'].browse(forum_id)
else:
forum_ids = self.env['forum.forum'].search([], limit=1)
if forum_ids:
forum = forum_ids[0]
if forum:
# karma gained: karma to ask a question and have 2 downvotes
karma = forum.karma_ask + (-2 * forum.karma_gen_question_downvote)
return self.write({'karma': karma})
return False
@api.multi
def add_karma(self, karma):
for user in self:
user.karma += karma
return True
@api.model
def get_serialised_gamification_summary(self, excluded_categories=None):
if isinstance(excluded_categories, list):
if 'forum' not in excluded_categories:
excluded_categories.append('forum')
else:
excluded_categories = ['forum']
return super(Users, self).get_serialised_gamification_summary(excluded_categories=excluded_categories)
# Wrapper for call_kw with inherits
@api.multi
def open_website_url(self):
return self.mapped('partner_id').open_website_url()
| agpl-3.0 |
tkasp/osmose-backend | modules/OsmBin.py | 4 | 25328 | #! /usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Etienne Chové <chove@crans.org> 2010 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
###########################################################################
## OSM IMPORT ##
###########################################################################
# 1. mkdir /data/osmbin
# 2. ./OsmBin.py --init /data/osmbin
# 3. wget -O - -o /dev/null http://planet.openstreetmap.org/planet-latest.osm.bz2 \
# | bunzip2
# | ./OsmBin.py --import /data/osmbin -
###########################################################################
## OSC UPDATE ##
###########################################################################
# for i in /data/updates
# do
# bzcat /data/updates/$i | ./OsmBin.py --update /data/osmbin -
# done
###########################################################################
## PYTHON ##
###########################################################################
# import OsmBin
# bin = OsmBin("/data/osmbin", "r")
# print bin.NodeGet(12)
# print bin.WayGet(12)
# print bin.RelationGet(12)
# print bin.RelationFullRecur(12)
from modules.lockfile import lockfile
from . import OsmReader
import sys
import os
class MissingDataError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "MissingDataError(%s)" % str(self.value)
class RelationLoopError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "RelationLoopError(%s)" % str(self.value)
###########################################################################
## Common functions
_CstMax2 = 2**16-1
_CstMax4 = 2**32-1
def _Bytes5ToInt(txt):
if len(txt) != 5:
return None
# 0 to 1.099.511.627.776
txt = bytearray(txt)
i0 = txt[0]
i1 = txt[1]
i2 = txt[2]
i3 = txt[3]
i4 = txt[4]
return 4294967296*i0+16777216*i1+65536*i2+256*i3+i4
def _IntToBytes5(num):
i0 = num//4294967296
num -= 4294967296*i0
i1 = num//16777216
num -= 16777216*i1
i2 = num//65536
num -= 65536*i2
i3 = num//256
i4 = num - 256*i3
return bytearray([i0, i1, i2, i3, i4])
def _Bytes4ToInt(txt):
# 0 to 4.294.967.295
txt = bytearray(txt)
i0 = txt[0]
i1 = txt[1]
i2 = txt[2]
i3 = txt[3]
return 16777216*i0+65536*i1+256*i2+i3
def _IntToBytes4(num):
i0 = num//16777216
num -= 16777216*i0
i1 = num//65536
num -= 65536*i1
i2 = num//256
i3 = num - 256*i2
return bytearray([i0, i1, i2, i3])
def _Bytes2ToInt(txt):
# 0 to 65535
txt = bytearray(txt)
i0 = txt[0]
i1 = txt[1]
return 256*i0+i1
def _IntToBytes2(num):
i0 = num//256
i1 = num - 256*i0
return bytearray([i0, i1])
def _Bytes1ToInt(txt):
# 0 to 255
txt = bytearray(txt)
return txt[0]
def _IntToBytes1(i0):
return bytearray([i0])
def _Bytes4ToCoord(num):
return float(_Bytes4ToInt(num)-1800000000)/10000000
def _CoordToBytes4(coord):
return _IntToBytes4(int((coord*10000000)+1800000000))
###########################################################################
## InitFolder
def InitFolder(folder):
nb_node_max = 2**4
nb_way_max = 2**4
if not os.path.exists(folder):
os.makedirs(folder)
# create node.crd
print("Creating node.crd")
groupe = 2**10
k = _IntToBytes4(0) * 2 * groupe
f = open(os.path.join(folder, "node.crd"), "wb")
for i in range(nb_node_max//groupe):
f.write(k)
f.close()
del k
# create way.idx
print("Creating way.idx")
groupe = 1000
k = _IntToBytes5(0) * groupe
f = open(os.path.join(folder, "way.idx"), "wb")
for i in range(nb_way_max//groupe):
f.write(k)
f.close()
del k
# reset way.data
print("Creating way.data")
open(os.path.join(folder, "way.data"), "wb").write(b"--") # for no data at location 0
# reset way.free
print("Creating way.free")
open(os.path.join(folder, "way.free"), "wb")
###########################################################################
## OsmBinWriter
class OsmBin:
def __init__(self, folder, mode = "r"):
self._mode = mode
self._folder = folder
self._reldir = os.path.join(folder, "relation")
self._fNode_crd = open(os.path.join(folder, "node.crd"), {"w":"rb+", "r":"rb"}[mode])
self._fWay_idx = open(os.path.join(folder, "way.idx"), {"w":"rb+", "r":"rb"}[mode])
self._fWay_data = open(os.path.join(folder, "way.data"), {"w":"rb+", "r":"rb"}[mode])
self._fWay_data_size = os.stat(os.path.join(folder, "way.data")).st_size
if self._mode == "w":
lock_file = os.path.join(folder, "lock")
self._lock = lockfile(lock_file)
self._ReadFree()
self.node_id_size = 5
def __del__(self):
try:
self._fNode_crd.close()
self._fWay_idx.close()
self._fWay_data.close()
except AttributeError:
pass
if self._mode == "w":
self._WriteFree()
del self._lock
def _ReadFree(self):
self._free = {}
for nbn in range(2001):
self._free[nbn] = []
f = open(os.path.join(self._folder, "way.free"))
while True:
line = f.readline()
if not line:
break
line = line.strip().split(';')
self._free[int(line[1])].append(int(line[0]))
def _WriteFree(self):
try:
self._free
except AttributeError:
return
f = open(os.path.join(self._folder, "way.free"), 'w')
for nbn in self._free:
for ptr in self._free[nbn]:
f.write("%d;%d\n" % (ptr, nbn))
f.close()
def begin(self):
pass
def end(self):
pass
#######################################################################
## node functions
def NodeGet(self, NodeId):
data = {}
data["id"] = NodeId
self._fNode_crd.seek(8*data[u"id"])
read = self._fNode_crd.read(8)
if len(read) != 8:
return None
data["lat"] = _Bytes4ToCoord(read[:4])
data["lon"] = _Bytes4ToCoord(read[4:])
data["tag"] = {}
return data
def NodeCreate(self, data):
LatBytes4 = _CoordToBytes4(data[u"lat"])
LonBytes4 = _CoordToBytes4(data[u"lon"])
self._fNode_crd.seek(8*data[u"id"])
self._fNode_crd.write(LatBytes4+LonBytes4)
NodeUpdate = NodeCreate
def NodeDelete(self, data):
LatBytes4 = _IntToBytes4(0)
LonBytes4 = _IntToBytes4(0)
self._fNode_crd.seek(8*data[u"id"])
self._fNode_crd.write(LatBytes4+LonBytes4)
#######################################################################
## way functions
def WayGet(self, WayId, dump_sub_elements=False):
self._fWay_idx.seek(5*WayId)
AdrWay = _Bytes5ToInt(self._fWay_idx.read(5))
if not AdrWay:
return None
self._fWay_data.seek(AdrWay)
nbn = _Bytes2ToInt(self._fWay_data.read(2))
data = self._fWay_data.read(self.node_id_size*nbn)
nds = []
for i in range(nbn):
nds.append(_Bytes5ToInt(data[self.node_id_size*i:self.node_id_size*(i+1)]))
return {"id": WayId, "nd": nds, "tag":{}}
def WayCreate(self, data):
self.WayDelete(data)
# Search space big enough to store node list
nbn = len(data["nd"])
if self._free[nbn]:
AdrWay = self._free[nbn].pop()
else:
AdrWay = self._fWay_data_size
self._fWay_data_size += 2 + self.node_id_size*nbn
# File way.idx
self._fWay_idx.seek(5*data[u"id"])
self._fWay_idx.write(_IntToBytes5(AdrWay))
# File way.dat
self._fWay_data.seek(AdrWay)
c = _IntToBytes2(len(data[u"nd"]))
for NodeId in data[u"nd"]:
c += _IntToBytes5(NodeId)
self._fWay_data.write(c)
WayUpdate = WayCreate
def WayDelete(self, data):
# Seek to position in file containing address to node list
self._fWay_idx.seek(5*data[u"id"])
AdrWay = _Bytes5ToInt(self._fWay_idx.read(5))
if not AdrWay:
return
# Free space
self._fWay_data.seek(AdrWay)
nbn = _Bytes2ToInt(self._fWay_data.read(2))
try:
self._free[nbn].append(AdrWay)
except KeyError:
print("Cannot access free[%d] for way id=%d, idx=%d" % (nbn, data[u"id"], AdrWay))
raise
# Save deletion
self._fWay_idx.seek(5*data[u"id"])
self._fWay_idx.write(_IntToBytes5(0))
#######################################################################
## relation functions
def RelationGet(self, RelationId, dump_sub_elements=False):
RelationId = "%09d" % RelationId
RelFolder = self._reldir + "/" + RelationId[0:3] + "/" + RelationId[3:6] + "/"
RelFile = RelationId[6:9]
if os.path.exists(RelFolder + RelFile):
return eval(open(RelFolder + RelFile).read())
else:
return None
def RelationCreate(self, data):
RelationId = "%09d" % data["id"]
RelFolder = self._reldir + "/" + RelationId[0:3] + "/" + RelationId[3:6] + "/"
RelFile = RelationId[6:9]
if not os.path.exists(RelFolder):
os.makedirs(RelFolder)
open(RelFolder + RelFile, "w").write(repr(data))
RelationUpdate = RelationCreate
def RelationDelete(self, data):
RelationId = "%09d" % data["id"]
RelFolder = self._reldir + "/" + RelationId[0:3] + "/" + RelationId[3:6] + "/"
RelFile = RelationId[6:9]
try:
os.remove(RelFolder + RelFile)
except:
pass
def RelationFullRecur(self, RelationId, WayNodes = True, RaiseOnLoop = True, RemoveSubarea = False, RecurControl = []):
rel = self.RelationGet(RelationId)
dta = [{"type": "relation", "data": rel}]
for m in rel["member"]:
if m["type"] == "node":
dta.append({"type": "node", "data": self.NodeGet(m["ref"])})
elif m["type"] == "way":
way = self.WayGet(m["ref"])
if not way:
raise MissingDataError("missing way %d" % m["ref"])
dta.append({"type": "way", "data": way})
if WayNodes:
for n in way["nd"]:
dta.append({"type": "node", "data": self.NodeGet(n)})
elif m["type"] == "relation":
if m["ref"] == RelationId:
if not RaiseOnLoop:
continue
raise RelationLoopError('self member '+str(RelationId))
if m["ref"] in RecurControl:
if not RaiseOnLoop:
continue
raise RelationLoopError('member loop '+str(RecurControl+[RelationId, m["ref"]]))
if RemoveSubarea and m["role"] in [u"subarea", u"region"]:
continue
dta += self.RelationFullRecur(m["ref"], WayNodes = WayNodes, RaiseOnLoop = RaiseOnLoop, RecurControl = RecurControl+[RelationId])
return dta
#######################################################################
## user functions
def UserGet(self, UserId):
return None
#######################################################################
def CopyWayTo(self, output):
self._fWay_idx.seek(0,2)
way_idx_size = self._fWay_idx.tell()
for i in range(way_idx_size // 5):
way = self.WayGet(i)
if way:
output.WayCreate(way)
def CopyRelationTo(self, output):
for i in os.listdir(self._reldir):
for j in os.listdir(self._reldir+"/"+i):
for k in os.listdir(self._reldir+"/"+i+"/"+j):
output.RelationCreate(eval(open(self._reldir+"/"+i+"/"+j+"/"+k).read()))
def Import(self, f):
i = OsmReader.open(f)
i.CopyTo(self)
def Update(self, f):
from . import OsmSax
if f == "-":
i = OsmSax.OscSaxReader(sys.stdin)
else:
i = OsmSax.OscSaxReader(f)
i.CopyTo(self)
###########################################################################
if __name__ == "__main__":
if sys.argv[1] == "--init":
InitFolder(sys.argv[2])
if sys.argv[1] == "--import":
o = OsmBin(sys.argv[2], "w")
o.Import(sys.argv[3])
if sys.argv[1] == "--update":
o = OsmBin(sys.argv[2], "w")
o.Update(sys.argv[3])
if sys.argv[1] == "--read":
i = OsmBin(sys.argv[2])
if sys.argv[3] == "node":
print(i.NodeGet(int(sys.argv[4])))
if sys.argv[3] == "way":
print(i.WayGet(int(sys.argv[4])))
if sys.argv[3] == "relation":
print(i.RelationGet(int(sys.argv[4])))
if sys.argv[3] == "relation_full":
import pprint
pprint.pprint(i.RelationFullRecur(int(sys.argv[4])))
if sys.argv[1] == "--pyro":
import Pyro.core
import Pyro.naming
class OsmBin2(Pyro.core.ObjBase, OsmBin):
def __init__(self, folder):
Pyro.core.ObjBase.__init__(self)
OsmBin.__init__(self, folder)
daemon = Pyro.core.Daemon()
#ns = Pyro.naming.NameServerLocator().getNS()
#daemon.useNameServer(ns)
uri = daemon.connect(OsmBin2("/data/work/osmbin/data/"), "OsmBin")
daemon.requestLoop()
###########################################################################
import unittest
class MockCountObjects:
def __init__(self):
self.num_nodes = 0
self.num_ways = 0
self.num_rels = 0
def NodeCreate(self, data):
self.num_nodes += 1
def WayCreate(self, data):
self.num_ways += 1
def RelationCreate(self, data):
self.num_rels += 1
class Test(unittest.TestCase):
def setUp(self):
import shutil
from modules import config
self.test_dir = config.dir_tmp + "/tests/osmbin/"
shutil.rmtree(self.test_dir, True)
InitFolder(self.test_dir)
self.a = OsmBin(self.test_dir, "w")
self.a.Import("tests/saint_barthelemy.osm.bz2")
def tearDown(self):
import shutil
del self.a
shutil.rmtree(self.test_dir)
def check_node(self, func, id, exists=True, expected=None):
res = func(id)
if exists:
assert res
assert res["lat"]
assert res["lon"]
self.assertEqual(res["id"], id)
if expected:
self.assertEqual(res["lat"], expected["lat"])
self.assertEqual(res["lon"], expected["lon"])
else:
if res:
self.assertEqual(res["lat"], _Bytes4ToCoord(_IntToBytes4(0)))
self.assertEqual(res["lon"], _Bytes4ToCoord(_IntToBytes4(0)))
def check_way(self, func, id, exists=True, expected=None):
res = func(id)
if exists:
assert res
assert res["nd"]
self.assertEqual(res["tag"], {})
self.assertEqual(res["id"], id)
if expected:
self.assertEqual(res["nd"], expected["nd"])
else:
assert not res
def check_relation(self, func, id, exists=True, expected=None):
res = func(id)
if exists:
assert res
assert res["member"]
assert isinstance(res["tag"], dict)
self.assertEqual(res["id"], id)
if expected:
self.assertEqual(res["member"], expected["member"])
self.assertEqual(res["tag"], expected["tag"])
else:
assert not res
def test_copy_relation(self):
o1 = MockCountObjects()
self.a.CopyRelationTo(o1)
self.assertEqual(o1.num_nodes, 0)
self.assertEqual(o1.num_ways, 0)
self.assertEqual(o1.num_rels, 16)
def test_node(self):
del self.a
self.a = OsmBin(self.test_dir, "r")
self.check_node(self.a.NodeGet, 266053077, expected={"lat": 17.9031745, "lon": -62.8363074})
self.check_node(self.a.NodeGet, 2619283351)
self.check_node(self.a.NodeGet, 2619283352, expected={"lat": 17.9005419, "lon": -62.8327042})
self.check_node(self.a.NodeGet, 1, False)
self.check_node(self.a.NodeGet, 266053076, False)
self.check_node(self.a.NodeGet, 2619283353, False)
def test_way(self):
self.check_way(self.a.WayGet, 24473155)
self.check_way(self.a.WayGet, 255316725, expected={"nd": [2610107905,2610107903,2610107901,2610107902,2610107904,2610107905]})
self.check_way(self.a.WayGet, 1, False)
self.check_way(self.a.WayGet, 24473154, False)
self.check_way(self.a.WayGet, 255316726, False)
def test_relation(self):
del self.a
self.a = OsmBin(self.test_dir, "r")
self.check_relation(self.a.RelationGet, 47796)
self.check_relation(self.a.RelationGet, 529891,
expected={"member": [{'type': 'node', 'ref': 670634766, 'role': ''},
{'type': 'node', 'ref': 670634768, 'role': ''}],
"tag": {"name": u"Saint-Barthélemy III",
"note": u"la Barriere des Quatre Vents",
"ref": u"9712303",
"site": u"geodesic",
"source": u"©IGN 2010 dans le cadre de la cartographie réglementaire",
"type": u"site",
"url": u"http://ancien-geodesie.ign.fr/fiche_geodesie_OM.asp?num_site=9712303&X=519509&Y=1980304"}
})
self.check_relation(self.a.RelationGet, 2324452,
expected={"member": [{'type': 'node', 'ref': 279149652, 'role': 'admin_centre'},
{'type': 'way', 'ref': 174027472, 'role': 'outer'},
{'type': 'way', 'ref': 53561037, 'role': 'outer'},
{'type': 'way', 'ref': 53561045, 'role': 'outer'},
{'type': 'way', 'ref': 53656098, 'role': 'outer'},
{'type': 'way', 'ref': 174027473, 'role': 'outer'},
{'type': 'way', 'ref': 174023902, 'role': 'outer'}],
"tag": {"admin_level": u"8",
"boundary": u"administrative",
"local_name": u"Statia",
"name": u"Sint Eustatius",
"name:el": u"Άγιος Ευστάθιος",
"name:fr": u"Saint-Eustache",
"name:nl": u"Sint Eustatius",
"type": u"boundary"}
})
self.check_relation(self.a.RelationGet, 2707693)
self.check_relation(self.a.RelationGet, 1, False)
self.check_relation(self.a.RelationGet, 47795, False)
self.check_relation(self.a.RelationGet, 2707694, False)
def test_relation_full(self):
res = self.a.RelationFullRecur(529891)
assert res
self.assertEqual(res[0]["type"], "relation")
self.assertEqual(res[0]["data"]["id"], 529891)
self.assertEqual(res[1]["type"], "node")
self.assertEqual(res[1]["data"]["id"], 670634766)
self.assertEqual(res[2]["type"], "node")
self.assertEqual(res[2]["data"]["id"], 670634768)
self.a.Update("tests/saint_barthelemy.osc.gz")
res = self.a.RelationFullRecur(7800)
assert res
self.assertEqual(res[0]["type"], "relation")
self.assertEqual(res[0]["data"]["id"], 7800)
self.assertEqual(res[1]["type"], "node")
self.assertEqual(res[1]["data"]["id"], 78)
self.assertEqual(res[2]["type"], "node")
self.assertEqual(res[2]["data"]["id"], 79)
self.assertEqual(res[3]["type"], "way")
self.assertEqual(res[3]["data"]["id"], 780)
self.assertEqual(res[4]["type"], "node")
self.assertEqual(res[4]["data"]["id"], 78)
self.assertEqual(res[5]["type"], "node")
self.assertEqual(res[5]["data"]["id"], 79)
def test_relation_full_missing(self):
with self.assertRaises(MissingDataError) as cm:
self.a.RelationFullRecur(47796)
self.assertEqual(str(cm.exception), "MissingDataError(missing way 82217912)")
def test_relation_full_loop(self):
self.a.Update("tests/saint_barthelemy.osc.gz")
with self.assertRaises(RelationLoopError) as cm:
self.a.RelationFullRecur(7801)
self.assertEqual(str(cm.exception), "RelationLoopError(member loop [7801, 7802, 7801])")
def test_update(self):
self.check_node(self.a.NodeGet, 2619283352, expected={"lat": 17.9005419, "lon": -62.8327042})
self.check_node(self.a.NodeGet, 1759873129)
self.check_node(self.a.NodeGet, 1759883953)
self.check_node(self.a.NodeGet, 1973325505)
self.check_way(self.a.WayGet, 24552609)
self.check_way(self.a.WayGet, 24552626)
self.check_way(self.a.WayGet, 24552826)
self.check_relation(self.a.RelationGet, 529891)
self.check_relation(self.a.RelationGet, 1106302)
self.check_node(self.a.NodeGet, 78, False)
self.check_node(self.a.NodeGet, 79, False)
self.check_way(self.a.WayGet, 780, False)
self.check_relation(self.a.RelationGet, 7800, False)
self.check_relation(self.a.RelationGet, 7801, False)
self.a.Update("tests/saint_barthelemy.osc.gz")
self.check_node(self.a.NodeGet, 2619283352, expected={"lat": 17.9005419, "lon": -62.8327042})
self.check_node(self.a.NodeGet, 1759873129, False)
self.check_node(self.a.NodeGet, 1759883953, False)
self.check_node(self.a.NodeGet, 1973325505, False)
self.check_way(self.a.WayGet, 24552609, False)
self.check_way(self.a.WayGet, 24552626, False)
self.check_way(self.a.WayGet, 24552826, False)
self.check_relation(self.a.RelationGet, 529891, False)
self.check_relation(self.a.RelationGet, 1106302, False)
self.check_node(self.a.NodeGet, 78, expected={"lat": 18.1, "lon": -63.1})
self.check_node(self.a.NodeGet, 79, expected={"lat": 18.2, "lon": -63.2})
self.check_way(self.a.WayGet, 780, expected={"nd": [78,79]})
self.check_relation(self.a.RelationGet, 7800,
expected={"member": [{'type': 'node', 'ref': 78, 'role': ''},
{'type': 'node', 'ref': 79, 'role': ''},
{'type': 'way', 'ref': 780, 'role': 'outer'}],
"tag": {"name": u"Saint-Barthélemy III"},
})
self.check_relation(self.a.RelationGet, 7801,
expected={"member": [{'type': 'relation', 'ref': 7802, 'role': ''}],
"tag": {},
})
| gpl-3.0 |
ldjebran/robottelo | tests/foreman/api/test_audit.py | 2 | 7868 | """Unit tests for the ``audit`` paths.
:Requirement: Audit
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: API
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from nailgun import entities
from robottelo.datafactory import gen_string
from robottelo.decorators import tier1
from robottelo.test import APITestCase
class AuditTestCase(APITestCase):
"""Tests for audit functionality
:CaseImportance: High
"""
@tier1
def test_positive_create_by_type(self):
"""Create entities of different types and check audit logs for these
events using entity type as search criteria
:id: 6c7ea7fc-6728-447f-9655-26fe0a2881bc
:customerscenario: true
:expectedresults: Audit logs contain corresponding entries per each
create event
:BZ: 1426742, 1492668, 1492696
:CaseImportance: Medium
:CaseComponent: AuditLog
"""
for entity_item in [
{'entity': entities.Architecture()},
{
'entity': entities.AuthSourceLDAP(),
'entity_type': 'auth_source',
'value_template': 'LDAP-{entity.name}'
},
{
'entity': entities.ComputeProfile(),
'entity_type': 'compute_profile'
},
{
'entity': entities.LibvirtComputeResource(),
'entity_type': 'compute_resource',
'value_template': '{entity.name} (Libvirt)'
},
{'entity': entities.ConfigGroup(), 'entity_type': 'config_group'},
{'entity': entities.Domain()},
{'entity': entities.Host()},
{'entity': entities.HostGroup()},
{'entity': entities.Image(
compute_resource=entities.LibvirtComputeResource().create())},
{'entity': entities.Location()},
{'entity': entities.Media(), 'entity_type': 'medium'},
{'entity': entities.Organization()},
{
'entity': entities.OperatingSystem(),
'entity_type': 'os',
'value_template': '{entity.name} {entity.major}'
},
{
'entity': entities.PartitionTable(),
'entity_type': 'ptable',
},
{'entity': entities.PuppetClass()},
{'entity': entities.Role()},
{
'entity': entities.Subnet(),
'value_template': '{entity.name} '
'({entity.network}/{entity.cidr})'
},
{
'entity': entities.ProvisioningTemplate(),
'entity_type': 'provisioning_template',
},
{'entity': entities.User(), 'value_template': '{entity.login}'},
{'entity': entities.UserGroup()},
{'entity': entities.ContentView(), 'entity_type': 'katello/content_view'},
{'entity': entities.LifecycleEnvironment(), 'entity_type': 'katello/kt_environment'},
{'entity': entities.ActivationKey(), 'entity_type': 'katello/activation_key'},
{'entity': entities.HostCollection(), 'entity_type': 'katello/host_collection'},
{'entity': entities.Product(), 'entity_type': 'katello/product'},
{
'entity': entities.GPGKey(),
'entity_type': 'katello/gpg_key',
'value_template': 'content credential (gpg_key - {entity.name})'
},
{'entity': entities.SyncPlan(
organization=entities.Organization(id=1)
), 'entity_type': 'katello/sync_plan'},
]:
created_entity = entity_item['entity'].create()
entity_type = entity_item.get(
'entity_type', created_entity.__class__.__name__.lower())
value_template = entity_item.get('value_template', '{entity.name}')
entity_value = value_template.format(entity=created_entity)
audits = entities.Audit().search(
query={'search': 'type={0}'.format(entity_type)})
entity_audits = [entry for entry in audits
if entry.auditable_name == entity_value]
if not entity_audits:
self.fail('audit not found by name "{0}" for entity: {1}'.format(
entity_value,
created_entity.__class__.__name__.lower()
)
)
audit = entity_audits[0]
self.assertEqual(audit.auditable_id, created_entity.id)
self.assertEqual(audit.action, 'create')
self.assertEqual(audit.version, 1)
@tier1
def test_positive_update_by_type(self):
"""Update some entities of different types and check audit logs for
these events using entity type as search criteria
:id: 43e73a11-b241-4b91-bdf6-e966366014e8
:expectedresults: Audit logs contain corresponding entries per each
update event
:CaseImportance: Medium
:CaseComponent: AuditLog
"""
for entity in [
entities.Architecture(),
entities.Domain(),
entities.HostGroup(),
entities.Location(),
entities.Organization(),
entities.Role(),
entities.UserGroup(),
]:
created_entity = entity.create()
name = created_entity.name
new_name = gen_string('alpha')
created_entity.name = new_name
created_entity = created_entity.update(['name'])
audits = entities.Audit().search(
query={'search': 'type={0}'.format(
created_entity.__class__.__name__.lower())
}
)
entity_audits = [entry for entry in audits
if entry.auditable_name == name]
if not entity_audits:
self.fail('audit not found by name "{}"'.format(name))
audit = entity_audits[0]
self.assertEqual(audit.auditable_id, created_entity.id)
self.assertEqual(
audit.audited_changes['name'], [name, new_name])
self.assertEqual(audit.action, 'update')
self.assertEqual(audit.version, 2)
@tier1
def test_positive_delete_by_type(self):
"""Delete some entities of different types and check audit logs for
these events using entity type as search criteria
:id: de9b056f-10da-485a-87ce-b02a9efff15c
:expectedresults: Audit logs contain corresponding entries per each
delete event
:CaseImportance: Medium
:CaseComponent: AuditLog
"""
for entity in [
entities.Architecture(),
entities.Domain(),
entities.Host(),
entities.HostGroup(),
entities.Location(),
entities.Organization(),
entities.Role(),
entities.UserGroup(),
]:
created_entity = entity.create()
created_entity.delete()
audits = entities.Audit().search(
query={'search': 'type={0}'.format(
created_entity.__class__.__name__.lower())
}
)
entity_audits = [entry for entry in audits
if entry.auditable_name == created_entity.name]
if not entity_audits:
self.fail('audit not found by name "{}"'.format(
created_entity.name))
audit = entity_audits[0]
self.assertEqual(audit.auditable_id, created_entity.id)
self.assertEqual(audit.action, 'destroy')
self.assertEqual(audit.version, 2)
| gpl-3.0 |
vikatory/kbengine | kbe/src/lib/python/Lib/__future__.py | 134 | 4584 | """Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/compile.h.
No feature line is ever to be deleted from this file.
"""
all_feature_names = [
"nested_scopes",
"generators",
"division",
"absolute_import",
"with_statement",
"print_function",
"unicode_literals",
"barry_as_FLUFL",
]
__all__ = ["all_feature_names"] + all_feature_names
# The CO_xxx symbols are defined here under the same names used by
# compile.h, so that an editor search will find them here. However,
# they're not exported in __all__, because they don't really belong to
# this module.
CO_NESTED = 0x0010 # nested_scopes
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
CO_FUTURE_DIVISION = 0x2000 # division
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
CO_FUTURE_BARRY_AS_BDFL = 0x40000
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
self.compiler_flag = compiler_flag
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return "_Feature" + repr((self.optional,
self.mandatory,
self.compiler_flag))
nested_scopes = _Feature((2, 1, 0, "beta", 1),
(2, 2, 0, "alpha", 0),
CO_NESTED)
generators = _Feature((2, 2, 0, "alpha", 1),
(2, 3, 0, "final", 0),
CO_GENERATOR_ALLOWED)
division = _Feature((2, 2, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_DIVISION)
absolute_import = _Feature((2, 5, 0, "alpha", 1),
(3, 0, 0, "alpha", 0),
CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, "alpha", 1),
(2, 6, 0, "alpha", 0),
CO_FUTURE_WITH_STATEMENT)
print_function = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_PRINT_FUNCTION)
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_UNICODE_LITERALS)
barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2),
(3, 9, 0, "alpha", 0),
CO_FUTURE_BARRY_AS_BDFL)
| lgpl-3.0 |
qvicksilver/ansible | lib/ansible/runner/action_plugins/async.py | 141 | 1915 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.runner.return_data import ReturnData
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' transfer the given module name, plus the async module, then run it '''
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
# shell and command module are the same
if module_name == 'shell':
module_name = 'command'
module_args += " #USE_SHELL"
if "tmp" not in tmp:
tmp = self.runner._make_tmp_path(conn)
(module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject, complex_args=complex_args)
self.runner._remote_chmod(conn, 'a+rx', module_path, tmp)
return self.runner._execute_module(conn, tmp, 'async_wrapper', module_args,
async_module=module_path,
async_jid=self.runner.generated_jid,
async_limit=self.runner.background,
inject=inject
)
| gpl-3.0 |
saisai/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/applywatchlist_unittest.py | 124 | 2302 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.applywatchlist import ApplyWatchList
class ApplyWatchListTest(unittest.TestCase):
def test_apply_watch_list_local(self):
capture = OutputCapture()
step = ApplyWatchList(MockTool(log_executive=True), MockOptions())
state = {
'bug_id': '50001',
'diff': 'The diff',
}
expected_logs = """MockWatchList: determine_cc_and_messages
MOCK bug comment: bug_id=50001, cc=set(['levin@chromium.org'])
--- Begin comment ---
Message2.
--- End comment ---
"""
capture.assert_outputs(self, step.run, [state], expected_logs=expected_logs)
| bsd-3-clause |
adamcandy/qgis-plugins-meshing-initial | dev/plugins/rastercalc/rastercalcengine.py | 3 | 11709 | # -*- coding: utf-8 -*-
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, adam.candy@imperial.ac.uk
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
#******************************************************************************
#
# RasterCalc
# ---------------------------------------------------------
# Raster manipulation plugin.
#
# Based on rewritten rasterlang plugin (C) 2008 by Barry Rowlingson
#
# Copyright (C) 2009 GIS-Lab (http://gis-lab.info) and
# Alexander Bruy (alexander.bruy@gmail.com)
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# A copy of the GNU General Public License is available on the World Wide Web
# at <http://www.gnu.org/copyleft/gpl.html>. You can also obtain it by writing
# to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
#
#******************************************************************************
from __future__ import division
import re
import numpy
import MathOperations
from PyQt4.QtGui import *
from pyparsing import Word, alphas, ParseException, Literal, CaselessKeyword, \
Combine, Optional, nums, Or, Forward, ZeroOrMore, StringEnd, alphanums, \
Regex
import rastercalcutils as rasterUtils
exprStack = []
rasterNames = set()
def rasterName():
return Combine( "[" + Word( alphas + nums, alphanums + "._-" ) + "]" )
def pushFirst( str, loc, toks ):
global exprStack
exprStack.append( toks[0] )
def getBand( data, n ):
n = n - 1
if len( data.shape ) == 3:
return data[ int( n ) ]
if len( data.shape ) == 2 and n == 1:
return data
if len( data.shape ) == 2:
if n == 0:
return data
else:
raise ValueError, "can't get band " + str( n ) + " from single-band raster"
raise ValueError, "array must be with 2 or 3 dimensions"
def assignVar( str, loc, toks ):
global rasterNames
rasterNames.add( toks[ 0 ] )
return toks[ 0 ]
def returnRaster( layerName ):
return rasterUtils.getRaster( layerName )
def returnBand( layerName, bandNum, row, size, count ):
return rasterUtils.getRasterBand( layerName, bandNum, row, size, count )
# conditional operators
def equal( raster, compare, replace ):
tmp = numpy.equal( raster, compare )
numpy.putmask( raster, tmp, replace )
return raster
def greater( raster, compare, replace ):
tmp = numpy.greater( raster, compare )
numpy.putmask( raster, tmp, replace )
return raster
def less( raster, compare, replace ):
tmp = numpy.less( raster, compare )
numpy.putmask( raster, tmp, replace )
return raster
def not_equal( raster, compare, replace ):
tmp = numpy.not_equal( raster, compare )
numpy.putmask( raster, tmp, replace )
return raster
def greater_equal( raster, compare, replace ):
tmp = numpy.greater_equal( raster, compare )
numpy.putmask( raster, tmp, replace )
return raster
def less_equal( raster, compare, replace ):
tmp = numpy.less_equal( raster, compare )
numpy.putmask( raster, tmp, replace )
return raster
# define grammar
point = Literal( '.' )
colon = Literal( ',' )
e = CaselessKeyword( 'E' )
plusorminus = Literal( '+' ) | Literal( '-' )
number = Word( nums )
integer = Combine( Optional( plusorminus ) + number )
floatnumber = Combine( integer +
Optional( point + Optional( number ) ) +
Optional( e + integer )
)
ident = Combine( "[" + Word( alphas + nums, alphanums + "._-" ) + "]" )
fn = Literal("exp") | Literal("log") | Literal("intS") | Literal("intF") | Literal("inty") | Literal("intx") | Literal("ddy") | Literal("ddx") | Literal("sin") | Literal("asin") | Literal("cos") | Literal("acos") | Literal("tan") | Literal("atan") | Literal("ddF") | Literal("dvg") | Literal("Mmin") | Literal("Mmax") | Literal("sqrt") | Literal("atan2") | Literal("log10") | Literal("eq") | Literal("ne") | Literal("lt") | Literal("gt") | Literal("le") | Literal("ge")| Literal("min")| Literal("max")| Literal("pow")| Literal("ln") | Literal("abs")
plus = Literal( "+" )
minus = Literal( "-" )
mult = Literal( "*" )
div = Literal( "/" )
lpar = Literal( "(" )
rpar = Literal( ")" )
equal_op = Literal( "=" )
not_equal_op = Literal( "!=" )
greater_op = Combine( Literal( ">" ) + ~Literal( "=" ) )
greater_equal_op = Combine( Literal( ">" ) + Literal( "=" ) )
less_op = Combine( Literal( "<" ) + ~Literal( "=" ) )
less_equal_op = Combine( Literal( "<" ) + Literal( "=" ) )
addop = plus | minus
multop = mult | div
compop = less_op | greater_op | less_equal_op | greater_equal_op | not_equal_op | equal_op
expop = Literal( "^" )
band = Literal( "@" )
args = 1
expr = Forward()
atom = ( ( e
| floatnumber
| integer
| (ident).setParseAction( assignVar ) + band + integer
| fn + lpar + expr + ZeroOrMore(colon + expr) + rpar
).setParseAction(pushFirst)
( lpar + expr + rpar )
)
factor = Forward()
factor << (atom | expr)
term = (factor | expr) + ZeroOrMore( multop + expr )
addterm = (term | expr) + ZeroOrMore( addop + expr )
expr << ((lpar + expr + rpar) | addterm)
bnf = expr
pattern = bnf + StringEnd()
# map operator symbols to corresponding arithmetic operations
opn = { "+" : ( lambda a,b: numpy.add( a, b ) ),
"-" : ( lambda a,b: numpy.subtract( a, b ) ),
"*" : ( lambda a,b: numpy.multiply( a, b ) ),
"/" : ( lambda a,b: MathOperations.divisionLim( a, b ) ),
"^" : ( lambda a,b: numpy.power( a, b) ),
"<" : ( lambda a,b: numpy.less( a, b) ),
">" : ( lambda a,b: numpy.greater( a, b) ),
"=" : ( lambda a,b: numpy.equal( a, b) ),
"!=" : ( lambda a,b: numpy.not_equal( a, b) ),
"<=" : ( lambda a,b: numpy.less_equal( a, b) ),
">=" : ( lambda a,b: numpy.greater_equal( a, b) )
}
func = { "sin": numpy.sin,
"asin": numpy.arcsin,
"cos": numpy.cos,
"abs": numpy.abs,
"acos": numpy.arccos,
"tan": numpy.tan,
"atan": numpy.arctan,
"atan2": numpy.arctan2,
"exp": numpy.exp,
"ln": MathOperations.lnLim,
"log": MathOperations.logLim,
"sqrt": numpy.sqrt,
"eq": equal,
"ne": not_equal,
"lt": less,
"pow": numpy.power,
"gt": greater,
"le": less_equal,
"ge": greater_equal,
"ddx": MathOperations.diferentiateLon,
"ddy": MathOperations.diferentiateLat,
"intx": MathOperations.integralLon,
"inty": MathOperations.integralLat,
"dvg": MathOperations.divergence,
"intS": MathOperations.surfaceIntegral,
"intF": MathOperations.integrateFields,
"ddF": MathOperations.diferentiateFields,
"min": MathOperations.multimin,
"max": MathOperations.multimax
}
p = ""
yes = True
# Expression evaluation using indirect recursion - works with brackets, priority and all
def evaluate(s, row, size, count):
print s
global yes
global p
if yes:
p = s.pop()
yes = False
r = term(s, row, size, count)
if len(s)<=0:
return r
while len(s)>0 and (p == '+' or p == '-'):
if p == '+':
print "Doing Addition"
p = s.pop()
r += opn['+'](r, term(s, row, size, count))
print "Finished Addition"
elif p == '-':
p = s.pop()
print "Entering Subtraction"
r = opn['-'](r, term(s, row, size, count))
print "Finished Subtraction"
return r
def term(s, row, size, count):
global p
print "Entering term", p
r = factor(s, row, size, count)
if len(s)<=0:
return r
while True:
p = s.pop() #Get he sign
if p == '*':
print "Doing Multiplication"
p = s.pop() #get the factor
r = opn["*"](r, factor(s, row, size, count))
print "Finished Multiplication"
elif p == '/':
print "Doing Division"
p = s.pop() #get the factor
r = opn["/"](r, factor(s, row, size, count))
print "Finished Division"
if not (len(p)>0 and (p == '*' or p == '/')):
break
print "Exiting term", p
return r
def factor(s, row, size, count):
global p
print "Entering factor", p
if p == '(':
print "Open Brackets"
p = s.pop() #'('
r = evaluate(s, row, size, count)
print "Close Brackets"
return r
elif p[0]>='0' and p[0]<='9':
r = float(p)
return r
elif p[0]=='[':
lay = p
s.pop() # @
p = s.pop()
num = int(p)
return returnBand( lay, num, row, size, count )
elif p == "PI":
return math.pi
elif p == "E":
return math.e
elif p in func:
if p in [ "eq", "ne", "gt", "lt", "ge", "le" ]:
op = p
s.pop() #'('
p = s.pop() #p is the first argument
op1 = evaluate(s, row, size, count)
p = s.pop() #p is the second argument
op2 = evaluate(s, row, size, count)
p = s.pop() #p is the third argument
op3 = evaluate(s, row, size, count)
r = func[op](op1, op2, op3)
return r
if p in ["intF", "ddF", "pow" ]:
op = p # Retain the function in a variable
s.pop() #Go over '('
p = s.pop() # take the first value
op1 = evaluate(s, row, size, count) # evaluate it
p = s.pop()
op2 = evaluate(s, row, size, count)
r = func[op](op1, op2)
return r
if p in ["min", "max", "log"]:
op = p
op1 = []
rasterListTemp = []
floatListTemp = []
floatListIndx = []
done = False
s.pop() #Go over '('
while True:
p = s.pop() # take the parameter
x = evaluate(s, row, size, count)
if not(isinstance(x,float) or isinstance(x,int)):
op1.append(x)
rasterListTemp.append(x)
if not done:
for i in range(len(floatListTemp)):
x = x*numpy.ones_like(rasterListTemp[0])
op1.insert(floatListIndx[i],floatListTemp)
done = True
else:
if not done:
floatListIndx.append(len(op1)+len(floatListTemp))
floatListTemp.append(x)
else:
x = x*numpy.ones_like(rasterListTemp[0])
op1.append(x)
if p == ')':
break
op1 = numpy.array(op1)
return func[op](op1)
#Otherwise, it has to be a one argument function:
op = p # Take the function
s.pop() #Go over '('
p = s.pop()
x = evaluate(s, row, size, count) #evaluate the parameter
print x
return func[op](x)
print "Exiting factor", p
| lgpl-2.1 |
miguelinux/vbox | src/VBox/ValidationKit/common/webutils.py | 1 | 6470 | # -*- coding: utf-8 -*-
# $Id: webutils.py $
"""
Common Web Utility Functions.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 108487 $"
# Standard Python imports.
import os;
import sys;
import unittest;
# Python 3 hacks:
if sys.version_info[0] < 3:
from urllib2 import quote as urllib_quote;
from urllib import urlencode as urllib_urlencode;
from urllib import urlopen as urllib_urlopen;
else:
from urllib.parse import quote as urllib_quote; # pylint: disable=F0401,E0611
from urllib.parse import urlencode as urllib_urlencode; # pylint: disable=F0401,E0611
from urllib.request import urlopen as urllib_urlopen; # pylint: disable=F0401,E0611
# Validation Kit imports.
from common import utils;
def escapeElem(sText):
"""
Escapes special character to HTML-safe sequences.
"""
sText = sText.replace('&', '&')
sText = sText.replace('<', '<')
return sText.replace('>', '>')
def escapeAttr(sText):
"""
Escapes special character to HTML-safe sequences.
"""
sText = sText.replace('&', '&')
sText = sText.replace('<', '<')
sText = sText.replace('>', '>')
return sText.replace('"', '"')
def escapeElemToStr(oObject):
"""
Stringifies the object and hands it to escapeElem.
"""
if utils.isString(oObject):
return escapeElem(oObject);
return escapeElem(str(oObject));
def escapeAttrToStr(oObject):
"""
Stringifies the object and hands it to escapeAttr. May return unicode string.
"""
if utils.isString(oObject):
return escapeAttr(oObject);
return escapeAttr(str(oObject));
def escapeAttrJavaScriptStringDQ(sText):
""" Escapes a javascript string that is to be emitted between double quotes. """
if '"' not in sText:
chMin = min(sText);
if ord(chMin) >= 0x20:
return sText;
sRet = '';
for ch in sText:
if ch == '"':
sRet += '\\"';
elif ord(ch) >= 0x20:
sRet += ch;
elif ch == '\n':
sRet += '\\n';
elif ch == '\r':
sRet += '\\r';
elif ch == '\t':
sRet += '\\t';
else:
sRet += '\\x%02x' % (ch,);
return sRet;
def quoteUrl(sText):
"""
See urllib.quote().
"""
return urllib_quote(sText);
def encodeUrlParams(dParams):
"""
See urllib.urlencode().
"""
return urllib_urlencode(dParams, doseq=True)
def hasSchema(sUrl):
"""
Checks if the URL has a schema (e.g. http://) or is file/server relative.
Returns True if schema is present, False if not.
"""
iColon = sUrl.find(':');
if iColon > 0:
sSchema = sUrl[0:iColon];
if len(sSchema) >= 2 and len(sSchema) < 16 and sSchema.islower() and sSchema.isalpha():
return True;
return False;
def getFilename(sUrl):
"""
Extracts the filename from the URL.
"""
## @TODO This isn't entirely correct. Use the urlparser instead!
sFilename = os.path.basename(sUrl.replace('/', os.path.sep));
return sFilename;
def downloadFile(sUrlFile, sDstFile, sLocalPrefix, fnLog, fnError = None, fNoProxies=True):
"""
Downloads the given file if an URL is given, otherwise assume it's
something on the build share and copy it from there.
Raises no exceptions, returns log + success indicator instead.
Note! This method may use proxies configured on the system and the
http_proxy, ftp_proxy, no_proxy environment variables.
@todo Fixed build burn here. Please set default value for fNoProxies
to appropriate one.
"""
if fnError is None:
fnError = fnLog;
if sUrlFile.startswith('http://') \
or sUrlFile.startswith('https://') \
or sUrlFile.startswith('ftp://'):
# Download the file.
fnLog('Downloading "%s" to "%s"...' % (sUrlFile, sDstFile));
try:
## @todo We get 404.html content instead of exceptions here, which is confusing and should be addressed.
if fNoProxies:
oSrc = urllib_urlopen(sUrlFile);
else:
oSrc = urllib_urlopen(sUrlFile, proxies = dict());
oDst = utils.openNoInherit(sDstFile, 'wb');
oDst.write(oSrc.read());
oDst.close();
oSrc.close();
except Exception, oXcpt:
fnError('Error downloading "%s" to "%s": %s' % (sUrlFile, sDstFile, oXcpt));
return False;
else:
# Assumes file from the build share.
sSrcPath = os.path.join(sLocalPrefix, sUrlFile);
fnLog('Copying "%s" to "%s"...' % (sSrcPath, sDstFile));
try:
utils.copyFileSimple(sSrcPath, sDstFile);
except Exception, oXcpt:
fnError('Error copying "%s" to "%s": %s' % (sSrcPath, sDstFile, oXcpt));
return False;
return True;
#
# Unit testing.
#
# pylint: disable=C0111
class CommonUtilsTestCase(unittest.TestCase):
def testHasSchema(self):
self.assertTrue(hasSchema('http://www.oracle.com/'));
self.assertTrue(hasSchema('https://virtualbox.com/'));
self.assertFalse(hasSchema('://virtualbox.com/'));
self.assertFalse(hasSchema('/usr/bin'));
self.assertFalse(hasSchema('usr/bin'));
self.assertFalse(hasSchema('bin'));
self.assertFalse(hasSchema('C:\\WINNT'));
if __name__ == '__main__':
unittest.main();
# not reached.
| gpl-2.0 |
lekum/ansible | v1/ansible/runner/action_plugins/async.py | 141 | 1915 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.runner.return_data import ReturnData
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' transfer the given module name, plus the async module, then run it '''
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
# shell and command module are the same
if module_name == 'shell':
module_name = 'command'
module_args += " #USE_SHELL"
if "tmp" not in tmp:
tmp = self.runner._make_tmp_path(conn)
(module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject, complex_args=complex_args)
self.runner._remote_chmod(conn, 'a+rx', module_path, tmp)
return self.runner._execute_module(conn, tmp, 'async_wrapper', module_args,
async_module=module_path,
async_jid=self.runner.generated_jid,
async_limit=self.runner.background,
inject=inject
)
| gpl-3.0 |
vlegoff/tsunami | src/primaires/scripting/editeurs/edt_evenement.py | 1 | 9075 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# pereIBILITY OF SUCH DAMAGE.
"""Ce fichier contient l'éditeur EdtEvenement, détaillé plus bas."""
from textwrap import wrap
from primaires.interpreteur.editeur import Editeur
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
from primaires.format.fonctions import *
from .edt_instructions import EdtInstructions
class EdtEvenement(Editeur):
"""Contexte-éditeur d'un évènement.
L'objet appelant est l'évènement.
"""
def __init__(self, pere, objet=None, attribut=None):
"""Constructeur de l'éditeur"""
Editeur.__init__(self, pere, objet, attribut)
self.ajouter_option("d", self.opt_supprimer_test)
self.ajouter_option("r", self.opt_modifier_test)
self.ajouter_option("h", self.opt_remonter_test)
self.ajouter_option("b", self.opt_descendre_test)
def opt_supprimer_test(self, arguments):
"""Supprime un test.
Syntaxe :
/d no
"""
evenement = self.objet
try:
no = int(arguments) - 1
assert no >= 0
assert no < len(evenement.tests)
except (ValueError, AssertionError):
self.pere << "|err|Numéro invalide ({}).|ff|".format(arguments)
else:
evenement.supprimer_test(no)
self.actualiser()
def opt_modifier_test(self, arguments):
"""Modifie un test.
Syntaxe :
/r <id> <ligne>
"""
evenement = self.objet
msgs = arguments.split(" ")
if len(msgs) < 2:
self.pere << "|err|Précisez un numéro de test suivi " \
"d'une nouvelle suite de tests.|ff|"
return
try:
i = int(msgs[0])
assert i > 0
except (ValueError, AssertionError):
self.pere << "|err|Nombre invalide.|ff|"
return
msg = " ".join(msgs[1:])
try:
test = evenement.tests[i - 1]
except IndexError:
self.pere << "|err|Test introuvable.|ff|"
return
try:
test.construire(msg)
except ValueError as err:
self.pere << "|err|Erreur lors du parsage du test.|ff|"
else:
self.actualiser()
def opt_remonter_test(self, arguments):
"""Remonte un test.
Syntaxe :
/h no
"""
evenement = self.objet
try:
no = int(arguments) - 1
assert no > 0
assert no < len(evenement.tests)
except (ValueError, AssertionError):
self.pere << "|err|Numéro invalide ({}).|ff|".format(arguments)
else:
evenement.remonter_test(no)
self.actualiser()
def opt_descendre_test(self, arguments):
"""Descend un test.
Syntaxe :
/b no
"""
evenement = self.objet
try:
no = int(arguments) - 1
assert no >= 0
assert no < len(evenement.tests) - 1
except (ValueError, AssertionError):
self.pere << "|err|Numéro invalide ({}).|ff|".format(arguments)
else:
evenement.descendre_test(no)
self.actualiser()
def accueil(self):
"""Message d'accueil du contexte"""
evenement = self.objet
msg = "| |tit|"
msg += "Édition de l'évènement {} de {}".format(evenement.nom_complet,
evenement.script.parent).ljust(76)
msg += "|ff||\n" + self.opts.separateur + "\n"
paragraphes = ["\n".join(wrap(p)) for p in evenement.aide_longue.split(
"\n")]
aide_longue = "\n".join(paragraphes)
msg += aide_longue + "\n\n"
variables = evenement.variables.values()
if variables:
msg += "Variables définies dans ce script :\n"
t_max = 0
for v in variables:
if len(v.nom) > t_max:
t_max = len(v.nom)
lignes = ["|grf|" + var.nom.ljust(t_max) + "|ff| : " + var.aide \
for var in variables]
msg += "\n".join(lignes)
msg += "\n\n"
evenements = sorted(evenement.evenements.values(),
key=lambda evt: evt.nom)
if evenements:
msg += "|cy|Sous-évènements disponibles :|ff|\n\n"
t_max = 0
for evt in evenements:
if len(evt.nom) > t_max:
t_max = len(evt.nom)
lignes = [" " + evt.nom.ljust(t_max) + " : " + evt.aide_courte \
for evt in evenements]
msg += "\n".join(lignes)
else:
msg += "|cy|Options :\n\n"
msg += " Entrez |ent|une suite de prédicats|ff| pour "
msg += "ajouter un test\n"
msg += " Ou |ent|un numéro de ligne|ff| pour l'éditer\n"
msg += " Ou |cmd|*|ff| pour éditer le test sinon\n"
msg += " |cmd|/d <numéro de ligne>|ff| pour supprimer un test\n"
msg += " |cmd|/r <numéro de ligne> <prédicats>|ff| pour "
msg += "modifier un test\n"
msg += " |cmd|/h <numéro de ligne>|ff| pour remonter un test\n"
msg += " |cmd|/b <numéro de ligne>|ff| pour descendre un test\n\n"
msg += "|cy|Conditions :|ff|\n"
tests = evenement.tests
longueur = 1
if tests:
if len(tests) >= 10:
longueur = 2
for i, test in enumerate(tests):
si = "|mr|si|ff| " if i == 0 else "|mr|sinon si|ff| "
msg += "\n |cmd|" + str(i + 1).rjust(longueur) + "|ff| "
msg += si + str(test)
msg += "\n " + " " * longueur + "|cmd|*|ff| |mr|sinon|ff|"
return msg
def interpreter(self, msg):
"""Interprétation de l'éditeur"""
evenement = self.objet
if evenement.evenements:
nom_evt = supprimer_accents(msg).lower()
if nom_evt in evenement.evenements:
evenement = evenement.evenements[nom_evt]
enveloppe = EnveloppeObjet(EdtEvenement, evenement)
enveloppe.parent = self
contexte = enveloppe.construire(self.pere)
self.migrer_contexte(contexte)
else:
self.pere << "|err|Cet évènement n'existe pas.|ff|"
return
if msg == "*":
if evenement.sinon is None:
evenement.creer_sinon()
enveloppe = EnveloppeObjet(EdtInstructions, evenement.sinon)
enveloppe.parent = self
contexte = enveloppe.construire(self.pere)
self.migrer_contexte(contexte)
elif msg.isdigit():
no_tests = int(msg) - 1
try:
tests = evenement.tests[no_tests]
except IndexError:
self.pere << "|err|Ce test n'existe pas.|ff|"
else:
enveloppe = EnveloppeObjet(EdtInstructions, tests)
enveloppe.parent = self
contexte = enveloppe.construire(self.pere)
self.migrer_contexte(contexte)
elif not msg:
self.pere << "|err|Précisez un test.|ff|"
else:
try:
evenement.ajouter_test(msg)
except ValueError as err:
self.pere << "|err|Erreur lors du parsage du test.|ff|"
else:
self.actualiser()
| bsd-3-clause |
davidharrigan/django | django/db/migrations/graph.py | 351 | 10956 | from __future__ import unicode_literals
import warnings
from collections import deque
from functools import total_ordering
from django.db.migrations.state import ProjectState
from django.utils.datastructures import OrderedSet
from django.utils.encoding import python_2_unicode_compatible
from .exceptions import CircularDependencyError, NodeNotFoundError
RECURSION_DEPTH_WARNING = (
"Maximum recursion depth exceeded while generating migration graph, "
"falling back to iterative approach. If you're experiencing performance issues, "
"consider squashing migrations as described at "
"https://docs.djangoproject.com/en/dev/topics/migrations/#squashing-migrations."
)
@python_2_unicode_compatible
@total_ordering
class Node(object):
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return '<Node: (%r, %r)>' % self.key
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def ancestors(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_ancestors' not in self.__dict__:
ancestors = deque([self.key])
for parent in sorted(self.parents):
ancestors.extendleft(reversed(parent.ancestors()))
self.__dict__['_ancestors'] = list(OrderedSet(ancestors))
return self.__dict__['_ancestors']
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def descendants(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_descendants' not in self.__dict__:
descendants = deque([self.key])
for child in sorted(self.children):
descendants.extendleft(reversed(child.descendants()))
self.__dict__['_descendants'] = list(OrderedSet(descendants))
return self.__dict__['_descendants']
@python_2_unicode_compatible
class MigrationGraph(object):
"""
Represents the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
self.cached = False
def add_node(self, key, implementation):
node = Node(key)
self.node_map[key] = node
self.nodes[key] = implementation
self.clear_cache()
def add_dependency(self, migration, child, parent):
if child not in self.nodes:
raise NodeNotFoundError(
"Migration %s dependencies reference nonexistent child node %r" % (migration, child),
child
)
if parent not in self.nodes:
raise NodeNotFoundError(
"Migration %s dependencies reference nonexistent parent node %r" % (migration, parent),
parent
)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
self.clear_cache()
def clear_cache(self):
if self.cached:
for node in self.nodes:
self.node_map[node].__dict__.pop('_ancestors', None)
self.node_map[node].__dict__.pop('_descendants', None)
self.cached = False
def forwards_plan(self, target):
"""
Given a node, returns a list of which previous nodes (dependencies)
must be applied, ending with the node itself.
This is the list you would follow if applying the migrations to
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use parent.key instead of parent to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (parent.key for parent in self.node_map[x].parents))
self.cached = True
node = self.node_map[target]
try:
return node.ancestors()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node)
def backwards_plan(self, target):
"""
Given a node, returns a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself.
This is the list you would follow if removing the migrations from
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use child.key instead of child to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (child.key for child in self.node_map[x].children))
self.cached = True
node = self.node_map[target]
try:
return node.descendants()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node, forwards=False)
def iterative_dfs(self, start, forwards=True):
"""
Iterative depth first search, for finding dependencies.
"""
visited = deque()
visited.append(start)
if forwards:
stack = deque(sorted(start.parents))
else:
stack = deque(sorted(start.children))
while stack:
node = stack.popleft()
visited.appendleft(node)
if forwards:
children = sorted(node.parents, reverse=True)
else:
children = sorted(node.children, reverse=True)
# reverse sorting is needed because prepending using deque.extendleft
# also effectively reverses values
stack.extendleft(children)
return list(OrderedSet(visited))
def root_nodes(self, app=None):
"""
Returns all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if (not any(key[0] == node[0] for key in self.node_map[node].parents)
and (not app or app == node[0])):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Returns all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if (not any(key[0] == node[0] for key in self.node_map[node].children)
and (not app or app == node[0])):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self, start, get_children):
# Algo from GvR:
# http://neopythonic.blogspot.co.uk/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in get_children(top):
if node in stack:
cycle = stack[stack.index(node):]
raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return 'Graph: %s nodes, %s edges' % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(len(node.parents) for node in self.node_map.values())
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, returns a complete ProjectState for it.
If at_end is False, returns the state before the migration has run.
If nodes is not provided, returns the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if len(nodes) == 0:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan:
if not at_end and migration in nodes:
continue
plan.append(migration)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
| bsd-3-clause |
GdZ/scriptfile | software/googleAppEngine/lib/grizzled/grizzled/test/io/TestPushback.py | 19 | 1420 | #!/usr/bin/python2.4
# $Id: 58917b33b42080b79747e553e2685ff5e3e2f84b $
#
# Nose program for testing grizzled.io classes/functions
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import google3
from grizzled.io import *
from cStringIO import StringIO
import os
import tempfile
import atexit
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class TestPushback(object):
def testPushback(self):
inputString = """abc
def
ghi
"""
f = StringIO(inputString)
pb = PushbackFile(f)
s = pb.readline()
print s
assert s == 'abc\n'
pb.pushback(s)
s = pb.readline()
print s
assert s == 'abc\n'
s = pb.read(1)
print s
assert s == 'd'
s = pb.readline()
print s
assert s == 'ef\n'
s = pb.read(-1)
print s
assert s == 'ghi\n'
s = pb.readline()
assert s == ''
pb.pushback('foobar')
s = pb.readline()
print s
assert s == 'foobar'
| mit |
abhisg/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
fitoria/askbot-devel | askbot/management/commands/get_tag_stats.py | 19 | 9243 | import sys
import optparse
from django.core.management.base import BaseCommand, CommandError
from askbot import models
from askbot import const
def get_tag_lines(tag_marks, width = 25):
output = list()
line = ''
for name in tag_marks:
if line == '':
line = name
elif len(line) + len(name) + 1 > width:
output.append(line)
line = name
else:
line += ' ' + name
output.append(line)
return output
def get_empty_lines(num_lines):
output = list()
for idx in xrange(num_lines):
output.append('')
return output
def pad_list(the_list, length):
if len(the_list) < length:
the_list.extend(get_empty_lines(length - len(the_list)))
def format_table_row(*cols, **kwargs):
max_len = max(map(len, cols))
for col in cols:
pad_list(col, max_len)
output = list()
for idx in xrange(max_len):
bits = list()
for col in cols:
bits.append(col[idx])
line = kwargs['format_string'] % tuple(bits)
output.append(line)
return output
class Command(BaseCommand):
help = 'Prints statistics of tag usage'
option_list = BaseCommand.option_list + (
optparse.make_option(
'-t',
'--sub-counts',
action = 'store_true',
default = False,
dest = 'sub_counts',
help = 'Print tag subscription statistics, for all tags, listed alphabetically'
),
optparse.make_option(
'-u',
'--user-sub-counts',
action = 'store_true',
default = False,
dest = 'user_sub_counts',
help = 'Print tag subscription data per user, with users listed alphabetically'
),
optparse.make_option(
'-e',
'--print-empty',
action = 'store_true',
default = False,
dest = 'print_empty',
help = 'Print empty records too (with zero counts)'
),
)
def handle(self, *args, **options):
if not(options['sub_counts'] ^ options['user_sub_counts']):
raise CommandError('Please use either -u or -t (but not both)')
print ''
if options['sub_counts']:
self.print_sub_counts(options['print_empty'])
if options['user_sub_counts']:
self.print_user_sub_counts(options['print_empty'])
print ''
def print_user_sub_counts(self, print_empty):
"""prints list of users and what tags they follow/ignore
"""
users = models.User.objects.all().order_by('username')
item_count = 0
for user in users:
tag_marks = user.tag_selections
#add names of explicitly followed tags
followed_tags = list()
followed_tags.extend(
tag_marks.filter(
reason='good'
).values_list(
'tag__name', flat = True
)
)
#add wildcards to the list of interesting tags
followed_tags.extend(user.interesting_tags.split())
for good_tag in user.interesting_tags.split():
followed_tags.append(good_tag)
ignored_tags = list()
ignored_tags.extend(
tag_marks.filter(
reason='bad'
).values_list(
'tag__name', flat = True
)
)
for bad_tag in user.ignored_tags.split():
ignored_tags.append(bad_tag)
subscribed_tags = list()
subscribed_tags.extend(
tag_marks.filter(
reason='subscribed'
).values_list(
'tag__name', flat = True
)
)
for subscribed_tag in user.subscribed_tags.split():
subscribed_tags.append(subscribed_tag)
followed_count = len(followed_tags)
ignored_count = len(ignored_tags)
subscribed_count = len(subscribed_tags)
total_count = followed_count + ignored_count + subscribed_count
if total_count == 0 and print_empty == False:
continue
if item_count == 0:
print '%-28s %25s %25s %25s' % ('User (id)', 'Interesting tags', 'Ignored tags', 'Subscribed tags')
print '%-28s %25s %25s %25s' % ('=========', '================', '============', '===============')
followed_lines = get_tag_lines(followed_tags, width = 25)
ignored_lines = get_tag_lines(ignored_tags, width = 25)
subscribed_lines = get_tag_lines(subscribed_tags, width = 25)
follow = '*'
if user.email_tag_filter_strategy == const.INCLUDE_INTERESTING:
follow = ''
user_string = '%s (%d)%s' % (user.username, user.id, follow)
output_lines = format_table_row(
[user_string,],
followed_lines,
ignored_lines,
subscribed_lines,
format_string = '%-28s %25s %25s %25s'
)
item_count += 1
for line in output_lines:
print line
print ''
self.print_postamble(item_count)
def get_wildcard_tag_stats(self):
"""This method collects statistics on all tags
that are followed or ignored via a wildcard selection
The return value is a dictionary, where keys are tag names
and values are two element lists with whe first value - follow count
and the second value - ignore count
"""
wild = dict()#the dict that is returned in the end
users = models.User.objects.all().order_by('username')
for user in users:
wk = user.interesting_tags.strip().split()
interesting_tags = models.Tag.objects.get_by_wildcards(wk)
for tag in interesting_tags:
if tag.name not in wild:
wild[tag.name] = [0, 0, 0]
wild[tag.name][0] += 1
wk = user.ignored_tags.strip().split()
ignored_tags = models.Tag.objects.get_by_wildcards(wk)
for tag in ignored_tags:
if tag.name not in wild:
wild[tag.name] = [0, 0, 0]
wild[tag.name][1] += 1
wk = user.subscribed_tags.strip().split()
subscribed_tags = models.Tag.objects.get_by_wildcards(wk)
for tag in subscribed_tags:
if tag.name not in wild:
wild[tag.name] = [0, 0, 0]
wild[tag.name][2] += 1
return wild
def print_sub_counts(self, print_empty):
"""prints subscription counts for
each tag (ignored and favorite counts)
"""
wild_tags = self.get_wildcard_tag_stats()
tags = models.Tag.objects.all().order_by('name')
item_count = 0
for tag in tags:
wild_follow = 0
wild_ignore = 0
wild_sub = 0
if tag.name in wild_tags:
(wild_follow, wild_ignore) = wild_tags[tag.name]
tag_marks = tag.user_selections
follow_count = tag_marks.filter(reason='good').count() \
+ wild_follow
ignore_count = tag_marks.filter(reason='bad').count() \
+ wild_ignore
subscribe_count = tag_marks.filter(reason='subscribe').count() \
+ wild_sub
follow_str = '%d (%d)' % (follow_count, wild_follow)
ignore_str = '%d (%d)' % (ignore_count, wild_ignore)
subscribe_str = '%d (%d)' % (subscribe_count, wild_sub)
counts = (11-len(subscribe_str)) * ' ' + subscribe_str + ' '
counts = (11-len(follow_str)) * ' ' + follow_str + ' '
counts += (11-len(ignore_str)) * ' ' + ignore_str
total_count = follow_count + ignore_count + subscribe_count
if total_count == 0 and print_empty == False:
continue
if item_count == 0:
print '%-32s %12s %12s %12s' % ('', 'Subscribed', 'Ignored ', 'Interesting')
print '%-32s %12s %12s %12s' % ('Tag name', 'Total(wild)', 'Total(wild)', 'Total(wild)')
print '%-32s %12s %12s %12s' % ('========', '===========', '===========', '===========')
print '%-32s %s' % (tag.name, counts)
item_count += 1
self.print_postamble(item_count)
def print_postamble(self, item_count):
print ''
if item_count == 0:
print 'Did not find anything'
else:
print '%d records shown' % item_count
print 'Since -e option was not selected, empty records were hidden'
| gpl-3.0 |
robertmattmueller/sdac-compiler | sympy/core/singleton.py | 29 | 2251 | """Singleton mechanism"""
from __future__ import print_function, division
from .core import Registry
from .assumptions import ManagedProperties
from .sympify import sympify
class SingletonRegistry(Registry):
"""
A map between singleton classes and the corresponding instances.
E.g. S.Exp == C.Exp()
"""
__slots__ = []
__call__ = staticmethod(sympify)
def __repr__(self):
return "S"
S = SingletonRegistry()
class Singleton(ManagedProperties):
"""
Metaclass for singleton classes.
A singleton class has only one instance which is returned every time the
class is instantiated. Additionally, this instance can be accessed through
the global registry object S as S.<class_name>.
Examples
========
>>> from sympy import S, Basic
>>> from sympy.core.singleton import Singleton
>>> from sympy.core.compatibility import with_metaclass
>>> class MySingleton(with_metaclass(Singleton, Basic)):
... pass
>>> Basic() is Basic()
False
>>> MySingleton() is MySingleton()
True
>>> S.MySingleton is MySingleton()
True
** Developer notes **
The class is instantiated immediately at the point where it is defined
by calling cls.__new__(cls). This instance is cached and cls.__new__ is
rebound to return it directly.
The original constructor is also cached to allow subclasses to access it
and have their own instance.
"""
def __init__(cls, name, bases, dict_):
super(Singleton, cls).__init__(cls, name, bases, dict_)
for ancestor in cls.mro():
if '__new__' in ancestor.__dict__:
break
if isinstance(ancestor, Singleton) and ancestor is not cls:
ctor = ancestor._new_instance
else:
ctor = cls.__new__
cls._new_instance = staticmethod(ctor)
the_instance = ctor(cls)
def __new__(cls):
return the_instance
cls.__new__ = staticmethod(__new__)
setattr(S, name, the_instance)
# Inject pickling support.
def __getnewargs__(self):
return ()
cls.__getnewargs__ = __getnewargs__
| gpl-3.0 |
mecury421/gmock | scripts/generator/cpp/gmock_class.py | 135 | 6565 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate Google Mock classes from base classes.
This program will read in a C++ source file and output the Google Mock
classes for the specified classes. If no class is specified, all
classes in the source file are emitted.
Usage:
gmock_class.py header-file.h [ClassName]...
Output is sent to stdout.
"""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import re
import sys
from cpp import ast
from cpp import utils
# Preserve compatibility with Python 2.3.
try:
_dummy = set
except NameError:
import sets
set = sets.Set
_VERSION = (1, 0, 1) # The version of this script.
# How many spaces to indent. Can set me with the INDENT environment variable.
_INDENT = 2
def _GenerateMethods(output_lines, source, class_node):
function_type = ast.FUNCTION_VIRTUAL | ast.FUNCTION_PURE_VIRTUAL
ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR
indent = ' ' * _INDENT
for node in class_node.body:
# We only care about virtual functions.
if (isinstance(node, ast.Function) and
node.modifiers & function_type and
not node.modifiers & ctor_or_dtor):
# Pick out all the elements we need from the original function.
const = ''
if node.modifiers & ast.FUNCTION_CONST:
const = 'CONST_'
return_type = 'void'
if node.return_type:
# Add modifiers like 'const'.
modifiers = ''
if node.return_type.modifiers:
modifiers = ' '.join(node.return_type.modifiers) + ' '
return_type = modifiers + node.return_type.name
template_args = [arg.name for arg in node.return_type.templated_types]
if template_args:
return_type += '<' + ', '.join(template_args) + '>'
if len(template_args) > 1:
for line in [
'// The following line won\'t really compile, as the return',
'// type has multiple template arguments. To fix it, use a',
'// typedef for the return type.']:
output_lines.append(indent + line)
if node.return_type.pointer:
return_type += '*'
if node.return_type.reference:
return_type += '&'
mock_method_macro = 'MOCK_%sMETHOD%d' % (const, len(node.parameters))
args = ''
if node.parameters:
# Get the full text of the parameters from the start
# of the first parameter to the end of the last parameter.
start = node.parameters[0].start
end = node.parameters[-1].end
# Remove // comments.
args_strings = re.sub(r'//.*', '', source[start:end])
# Condense multiple spaces and eliminate newlines putting the
# parameters together on a single line. Ensure there is a
# space in an argument which is split by a newline without
# intervening whitespace, e.g.: int\nBar
args = re.sub(' +', ' ', args_strings.replace('\n', ' '))
# Create the mock method definition.
output_lines.extend(['%s%s(%s,' % (indent, mock_method_macro, node.name),
'%s%s(%s));' % (indent*3, return_type, args)])
def _GenerateMocks(filename, source, ast_list, desired_class_names):
processed_class_names = set()
lines = []
for node in ast_list:
if (isinstance(node, ast.Class) and node.body and
# desired_class_names being None means that all classes are selected.
(not desired_class_names or node.name in desired_class_names)):
class_name = node.name
processed_class_names.add(class_name)
class_node = node
# Add namespace before the class.
if class_node.namespace:
lines.extend(['namespace %s {' % n for n in class_node.namespace]) # }
lines.append('')
# Add the class prolog.
lines.append('class Mock%s : public %s {' % (class_name, class_name)) # }
lines.append('%spublic:' % (' ' * (_INDENT // 2)))
# Add all the methods.
_GenerateMethods(lines, source, class_node)
# Close the class.
if lines:
# If there are no virtual methods, no need for a public label.
if len(lines) == 2:
del lines[-1]
# Only close the class if there really is a class.
lines.append('};')
lines.append('') # Add an extra newline.
# Close the namespace.
if class_node.namespace:
for i in range(len(class_node.namespace)-1, -1, -1):
lines.append('} // namespace %s' % class_node.namespace[i])
lines.append('') # Add an extra newline.
if desired_class_names:
missing_class_name_list = list(desired_class_names - processed_class_names)
if missing_class_name_list:
missing_class_name_list.sort()
sys.stderr.write('Class(es) not found in %s: %s\n' %
(filename, ', '.join(missing_class_name_list)))
elif not processed_class_names:
sys.stderr.write('No class found in %s\n' % filename)
return lines
def main(argv=sys.argv):
if len(argv) < 2:
sys.stderr.write('Google Mock Class Generator v%s\n\n' %
'.'.join(map(str, _VERSION)))
sys.stderr.write(__doc__)
return 1
global _INDENT
try:
_INDENT = int(os.environ['INDENT'])
except KeyError:
pass
except:
sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT'))
filename = argv[1]
desired_class_names = None # None means all classes in the source file.
if len(argv) >= 3:
desired_class_names = set(argv[2:])
source = utils.ReadFile(filename)
if source is None:
return 1
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# An error message was already printed since we couldn't parse.
pass
else:
lines = _GenerateMocks(filename, source, entire_ast, desired_class_names)
sys.stdout.write('\n'.join(lines))
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.