repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
mutability/dump978
plot_nexrad.py
22
4597
#!/usr/bin/env python2 # # This takes the output of extract_nexrad and generates images. # It isn't very smart at the moment and won't draw anything # until all input has been consumed, so it's not very useful # for realtime continuous generation of maps. # import sys, math import cairo, colorsys intensities = { 0: colorsys.hls_to_rgb(240.0/360.0, 0.15, 1.0), 1: colorsys.hls_to_rgb(240.0/360.0, 0.2, 1.0), 2: colorsys.hls_to_rgb(200.0/360.0, 0.4, 1.0), 3: colorsys.hls_to_rgb(160.0/360.0, 0.4, 1.0), 4: colorsys.hls_to_rgb(120.0/360.0, 0.5, 1.0), 5: colorsys.hls_to_rgb(80.0/360.0, 0.5, 1.0), 6: colorsys.hls_to_rgb(40.0/360.0, 0.6, 1.0), 7: colorsys.hls_to_rgb(0.0/360.0, 0.7, 1.0) } def color_for(intensity): r,g,b = intensities[intensity] return cairo.SolidPattern(r,g,b,1.0) # mercator projection (yeah, it's not great, but it's simple) # lat, lon are in _arcminutes_ def project(lat,lon): lat /= 60.0 lat = math.pi * lat / 180.0 lon /= 60.0 lon -= 360.0 lon = math.pi * lon / 180.0 x = lon y = math.log(math.tan(math.pi/4.0 + lat/2.0)) return (x,y) images = {} while True: line = sys.stdin.readline() if not line: break words = line.strip().split(' ') if words[0] != 'NEXRAD': continue nexrad, maptype, maptime, sf, latN, lonW, latSize, lonSize, blockdata = words sf = int(sf) latN = int(latN) lonW = int(lonW) latSize = int(latSize) lonSize = int(lonSize) key = maptype + '/' + maptime if not key in images: images[key] = { 'type' : maptype, 'time' : maptime, 'lat_min' : latN - latSize, 'lat_max' : latN, 'lon_min' : lonW, 'lon_max' : lonW + lonSize, 'blocks' : { sf : [ (latN, lonW, latSize, lonSize, blockdata) ] } } else: image = images[key] image['lat_min'] = min(image['lat_min'], latN - latSize) image['lat_max'] = max(image['lat_max'], latN) image['lon_min'] = min(image['lon_min'], lonW) image['lon_max'] = max(image['lon_max'], lonW + lonSize) if not sf in image['blocks']: image['blocks'][sf] = [ (latN, lonW, latSize, lonSize, blockdata) ] else: image['blocks'][sf].append( (latN, lonW, latSize, lonSize, blockdata) ) for image in images.values(): lat_min = image['lat_min'] lat_max = image['lat_max'] lon_min = image['lon_min'] lon_max = image['lon_max'] # find most detailed scale; scale our image accordingly sf = min(image['blocks'].keys()) if sf == 1: scale = 5.0 elif sf == 2: scale = 9.0 else: scale = 1.0 pixels_per_degree = 80.0 / scale # project, find scale x0,y0 = project(lat_min,lon_min) x1,y1 = project(lat_min,lon_max) x2,y2 = project(lat_max,lon_min) x3,y3 = project(lat_max,lon_max) xmin = min(x0,x1,x2,x3) xmax = max(x0,x1,x2,x3) ymin = min(y0,y1,y2,y3) ymax = max(y0,y1,y2,y3) xsize = int(pixels_per_degree * 180.0 * (xmax - xmin) / math.pi) ysize = int(pixels_per_degree * 180.0 * (ymax - ymin) / math.pi) print image['type'], image['time'], 'dimensions', xsize, ysize, 'layers', len(image['blocks']) surface = cairo.ImageSurface(cairo.FORMAT_RGB24, xsize, ysize) cc = cairo.Context(surface) cc.set_antialias(cairo.ANTIALIAS_NONE) cc.scale(1.0 * xsize / (xmax - xmin), -1.0 * ysize / (ymax - ymin)) cc.translate(-xmin, -ymax) if image['type'] == 'CONUS': cc.set_source(color_for(0)) else: r,g,b = colorsys.hls_to_rgb(270.0/360.0, 0.10, 1.0) cc.set_source(cairo.SolidPattern(r,g,b,1.0)) cc.paint() for sf in sorted(image['blocks'].keys(), reverse=True): # lowest res first for latN, lonW, latSize, lonSize, data in image['blocks'][sf]: for y in xrange(4): for x in xrange(32): intensity = int(data[x+y*32]) lat = latN - y * latSize / 4.0 lon = lonW + x * lonSize / 32.0 cc.move_to(*project(lat,lon)) cc.line_to(*project(lat-latSize/4.0,lon)) cc.line_to(*project(lat-latSize/4.0,lon+lonSize/32.0)) cc.line_to(*project(lat,lon+lonSize/32.0)) cc.close_path() cc.set_source(color_for(intensity)) cc.fill() surface.write_to_png('nexrad_%s_%s.png' % (image['type'], image['time']))
gpl-2.0
alsrgv/tensorflow
tensorflow/python/ops/parallel_for/__init__.py
52
1204
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for pfor, for_loop, jacobian.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops.parallel_for import * # pylint: disable=wildcard-import from tensorflow.python.ops.parallel_for.control_flow_ops import for_loop from tensorflow.python.ops.parallel_for.control_flow_ops import pfor from tensorflow.python.ops.parallel_for.gradients import batch_jacobian from tensorflow.python.ops.parallel_for.gradients import jacobian
apache-2.0
sdgdsffdsfff/xunlei-lixian
lixian_query.py
10
11797
__all__ = ['query', 'bt_query', 'user_query', 'Query', 'ExactQuery', 'SearchQuery', 'build_query', 'find_tasks_to_download', 'search_tasks', 'expand_bt_sub_tasks'] import lixian_hash_bt import lixian_hash_ed2k import lixian_encoding def link_normalize(url): from lixian_url import url_unmask, normalize_unicode_link url = url_unmask(url) if url.startswith('magnet:'): return 'bt://'+lixian_hash_bt.magnet_to_infohash(url).encode('hex') elif url.startswith('ed2k://'): return lixian_hash_ed2k.parse_ed2k_id(url) elif url.startswith('bt://'): return url.lower() elif url.startswith('http://') or url.startswith('ftp://'): return normalize_unicode_link(url) return url def link_equals(x1, x2): return link_normalize(x1) == link_normalize(x2) class TaskBase(object): def __init__(self, client, list_tasks, limit=None): self.client = client self.fetch_tasks_unlimited = list_tasks self.limit = limit self.queries = [] self.tasks = None self.files = {} self.commit_jobs = [[], []] self.download_jobs = [] def fetch_tasks(self): if self.limit: with self.client.attr(limit=self.limit): return self.fetch_tasks_unlimited() else: return self.fetch_tasks_unlimited() def register_queries(self, queries): self.queries += queries def unregister_query(self, query): self.queries.remove(query) def get_tasks(self): if self.tasks is None: self.tasks = self.fetch_tasks() return self.tasks def refresh_tasks(self): self.tasks = self.fetch_tasks() return self.tasks def get_files(self, task): assert isinstance(task, dict), task id = task['id'] if id in self.files: return self.files[id] self.files[id] = self.client.list_bt(task) return self.files[id] def find_task_by_id(self, id): assert isinstance(id, basestring), repr(id) for t in self.get_tasks(): if t['id'] == str(id) or t['#'] == int(id): return t def get_task_by_id(self, id): t = self.find_task_by_id(id) if not t: raise Exception('No task found for id '+id) return t def find_task_by_hash(self, hash): for t in self.get_tasks(): if t['type'] == 'bt' and t['bt_hash'].lower() == hash: return t def find_task_by_url(self, url): for t in self.get_tasks(): if link_equals(t['original_url'], url): return t def get_task_by_url(self, url): t = self.find_task_by_url(url) if not t: raise Exception('No task found for ' + lixian_encoding.to_native(url)) return t def add_url_task(self, url): self.commit_jobs[0].append(url) def add_bt_task_by_hash(self, hash): self.commit_jobs[1].append(['hash', hash]) def add_bt_task_by_content(self, content, name): self.commit_jobs[1].append(['content', (content, name)]) def add_magnet_task(self, hash): self.commit_jobs[1].append(['magnet', hash]) def commit(self): urls, bts = self.commit_jobs if urls: self.client.add_batch_tasks(map(lixian_encoding.try_native_to_utf_8, urls)) for bt_type, value in bts: if bt_type == 'hash': print 'Adding bt task', value # TODO: print the thing user inputs (may be not hash) self.client.add_torrent_task_by_info_hash(value) elif bt_type == 'content': content, name = value print 'Adding bt task', name self.client.add_torrent_task_by_content(content) elif bt_type == 'magnet': print 'Adding magnet task', value # TODO: print the thing user inputs (may be not hash) self.client.add_task(value) else: raise NotImplementedError(bt_type) self.commit_jobs = [[], []] self.refresh_tasks() def prepare(self): # prepare actions (e.g. add tasks) for query in self.queries: query.prepare() # commit and refresh task list self.commit() def query_complete(self): for query in list(self.queries): query.query_complete() def merge_results(self): tasks = merge_tasks(self.download_jobs) for t in tasks: if t['type'] == 'bt': # XXX: a dirty trick to cache requests t['base'] = self self.download_jobs = tasks def query_once(self): self.prepare() # merge results for query in self.queries: self.download_jobs += query.query_once() self.query_complete() self.merge_results() def query_search(self): for query in self.queries: self.download_jobs += query.query_search() self.merge_results() def peek_download_jobs(self): return self.download_jobs def pull_completed(self): completed = [] waiting = [] for t in self.download_jobs: if t['status_text'] == 'completed': completed.append(t) elif t['type'] != 'bt': waiting.append(t) elif 'files' not in t: waiting.append(t) else: i_completed = [] i_waiting = [] for f in t['files']: if f['status_text'] == 'completed': i_completed.append(f) else: i_waiting.append(f) if i_completed: tt = dict(t) tt['files'] = i_completed completed.append(tt) if i_waiting: tt = dict(t) tt['files'] = i_waiting waiting.append(tt) self.download_jobs = waiting return completed def refresh_status(self): self.refresh_tasks() self.files = {} tasks = [] for old_task in self.download_jobs: new_task = dict(self.get_task_by_id(old_task['id'])) if 'files' in old_task: files = self.get_files(new_task) new_task['files'] = [files[f['index']] for f in old_task['files']] tasks.append(new_task) self.download_jobs = tasks class Query(object): def __init__(self, base): self.bind(base) def bind(self, base): self.base = base self.client = base.client return self def unregister(self): self.base.unregister_query(self) def prepare(self): pass def query_once(self): raise NotImplementedError() def query_complete(self): raise NotImplementedError() def query_search(self): raise NotImplementedError() class ExactQuery(Query): def __init__(self, base): super(ExactQuery, self).__init__(base) def query_once(self): raise NotImplementedError() def query_complete(self): self.unregister() def query_search(self): raise NotImplementedError() class SearchQuery(Query): def __init__(self, base): super(SearchQuery, self).__init__(base) def query_once(self): return self.query_search() def query_complete(self): pass def query_search(self): raise NotImplementedError() ################################################## # register ################################################## processors = [] bt_processors = [] # 0 # 1 -- builtin -- most # 2 -- subs -- 0/[0-9] # 4 -- magnet # 5 -- user # 6 -- extend url # 7 -- plain url, bt url # 8 -- filter # 9 -- default -- text search def query(priority): assert isinstance(priority, (int, float)) def register(processor): processors.append((priority, processor)) return processor return register def bt_query(priority): assert isinstance(priority, (int, float)) def register(processor): bt_processors.append((priority, processor)) return processor return register def user_query(processor): return query(priority=5)(processor) def load_default_queries(): import lixian_queries ################################################## # query ################################################## def to_list_tasks(client, args): if args.category: return lambda: client.read_all_tasks_by_category(args.category) elif args.deleted: return client.read_all_deleted elif args.expired: return client.read_all_expired elif args.completed: return client.read_all_tasks elif args.failed: return client.read_all_tasks elif args.all: return client.read_all_tasks else: return client.read_all_tasks def to_query(base, arg, processors): for _, process in sorted(processors): q = process(base, arg) if q: return q raise NotImplementedError('No proper query process found for: ' + arg) def merge_files(files1, files2): ids = [] files = [] for f in files1 + files2: if f['id'] not in ids: files.append(f) ids.append(f['id']) return files def merge_tasks(tasks): result_tasks = [] task_mapping = {} for task in tasks: assert type(task) == dict, repr(type) id = task['id'] assert 'index' not in task if id in task_mapping: if 'files' in task and 'files' in task_mapping[id]: task_mapping[id]['files'] = merge_files(task_mapping[id]['files'], task['files']) else: if 'files' in task: t = dict(task) result_tasks.append(t) task_mapping[id] = t else: result_tasks.append(task) task_mapping[id] = task return result_tasks class AllQuery(SearchQuery): def __init__(self, base): super(AllQuery, self).__init__(base) def query_search(self): return self.base.get_tasks() class CompletedQuery(SearchQuery): def __init__(self, base): super(CompletedQuery, self).__init__(base) def query_search(self): return filter(lambda x: x['status_text'] == 'completed', self.base.get_tasks()) class FailedQuery(SearchQuery): def __init__(self, base): super(FailedQuery, self).__init__(base) def query_search(self): return filter(lambda x: x['status_text'] == 'failed', self.base.get_tasks()) class NoneQuery(SearchQuery): def __init__(self, base): super(NoneQuery, self).__init__(base) def query_search(self): return [] def default_query(options): if options.category: return AllQuery elif options.deleted: return AllQuery elif options.expired: return AllQuery elif options.completed: return CompletedQuery elif options.failed: return FailedQuery elif options.all: return AllQuery else: return NoneQuery def parse_queries(base, args): return [to_query(base, arg, bt_processors if args.torrent else processors) for arg in args] or [default_query(args)(base)] def parse_limit(args): limit = args.limit if limit: limit = int(limit) ids = [] for x in args: import re if re.match(r'^\d+$', x): ids.append(int(x)) elif re.match(r'^(\d+)/', x): ids.append(int(x.split('/')[0])) elif re.match(r'^(\d+)-(\d+)$', x): ids.extend(map(int, x.split('-'))) else: return limit if ids and limit: return min(max(ids)+1, limit) elif ids: return max(ids)+1 else: return limit def build_query(client, args): if args.input: import fileinput args._left.extend(line.strip() for line in fileinput.input(args.input) if line.strip()) load_default_queries() # IMPORTANT: init default queries limit = parse_limit(args) base = TaskBase(client, to_list_tasks(client, args), limit) base.register_queries(parse_queries(base, args)) return base ################################################## # compatible APIs ################################################## def find_tasks_to_download(client, args): base = build_query(client, args) base.query_once() return base.peek_download_jobs() def search_tasks(client, args): base = build_query(client, args) base.query_search() return base.peek_download_jobs() def expand_bt_sub_tasks(task): files = task['base'].get_files(task) # XXX: a dirty trick to cache requests not_ready = [] single_file = False if len(files) == 1 and files[0]['name'] == task['name']: single_file = True if 'files' in task: ordered_files = [] for t in task['files']: assert isinstance(t, dict) if t['status_text'] != 'completed': not_ready.append(t) else: ordered_files.append(t) files = ordered_files return files, not_ready, single_file ################################################## # simple helpers ################################################## def get_task_by_id(client, id): base = TaskBase(client, client.read_all_tasks) return base.get_task_by_id(id) def get_task_by_any(client, arg): import lixian_cli_parser tasks = search_tasks(client, lixian_cli_parser.parse_command_line([arg])) if not tasks: raise LookupError(arg) if len(tasks) > 1: raise LookupError('Too many results for ' + arg) return tasks[0]
mit
denny820909/builder
lib/python2.7/site-packages/SQLAlchemy-0.7.0-py2.7-linux-x86_64.egg/sqlalchemy/dialects/mysql/mysqldb.py
8
2818
# mysql/mysqldb.py # Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Support for the MySQL database via the MySQL-python adapter. MySQL-Python is available at: http://sourceforge.net/projects/mysql-python At least version 1.2.1 or 1.2.2 should be used. Connecting ----------- Connect string format:: mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname> Character Sets -------------- Many MySQL server installations default to a ``latin1`` encoding for client connections. All data sent through the connection will be converted into ``latin1``, even if you have ``utf8`` or another character set on your tables and columns. With versions 4.1 and higher, you can change the connection character set either through server configuration or by including the ``charset`` parameter in the URL used for ``create_engine``. The ``charset`` option is passed through to MySQL-Python and has the side-effect of also enabling ``use_unicode`` in the driver by default. For regular encoded strings, also pass ``use_unicode=0`` in the connection arguments:: # set client encoding to utf8; all strings come back as unicode create_engine('mysql+mysqldb:///mydb?charset=utf8') # set client encoding to utf8; all strings come back as utf8 str create_engine('mysql+mysqldb:///mydb?charset=utf8&use_unicode=0') Known Issues ------------- MySQL-python version 1.2.2 has a serious memory leak related to unicode conversion, a feature which is disabled via ``use_unicode=0``. Using a more recent version of MySQL-python is recommended. The recommended connection form with SQLAlchemy is:: engine = create_engine('mysql://scott:tiger@localhost/test?charset=utf8&use_unicode=0', pool_recycle=3600) """ from sqlalchemy.dialects.mysql.base import (MySQLDialect, MySQLExecutionContext, MySQLCompiler, MySQLIdentifierPreparer) from sqlalchemy.connectors.mysqldb import ( MySQLDBExecutionContext, MySQLDBCompiler, MySQLDBIdentifierPreparer, MySQLDBConnector ) class MySQLExecutionContext_mysqldb(MySQLDBExecutionContext, MySQLExecutionContext): pass class MySQLCompiler_mysqldb(MySQLDBCompiler, MySQLCompiler): pass class MySQLIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer, MySQLIdentifierPreparer): pass class MySQLDialect_mysqldb(MySQLDBConnector, MySQLDialect): execution_ctx_cls = MySQLExecutionContext_mysqldb statement_compiler = MySQLCompiler_mysqldb preparer = MySQLIdentifierPreparer_mysqldb dialect = MySQLDialect_mysqldb
mit
martinmaly/service-catalog
vendor/github.com/ugorji/go/codec/test.py
1516
4019
#!/usr/bin/env python # This will create golden files in a directory passed to it. # A Test calls this internally to create the golden files # So it can process them (so we don't have to checkin the files). # Ensure msgpack-python and cbor are installed first, using: # sudo apt-get install python-dev # sudo apt-get install python-pip # pip install --user msgpack-python msgpack-rpc-python cbor # Ensure all "string" keys are utf strings (else encoded as bytes) import cbor, msgpack, msgpackrpc, sys, os, threading def get_test_data_list(): # get list with all primitive types, and a combo type l0 = [ -8, -1616, -32323232, -6464646464646464, 192, 1616, 32323232, 6464646464646464, 192, -3232.0, -6464646464.0, 3232.0, 6464.0, 6464646464.0, False, True, u"null", None, u"someday", 1328176922000002000, u"", -2206187877999998000, u"bytestring", 270, u"none", -2013855847999995777, #-6795364578871345152, ] l1 = [ { "true": True, "false": False }, { "true": u"True", "false": False, "uint16(1616)": 1616 }, { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], "int32":32323232, "bool": True, "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890", "SHORT STRING": u"1234567890" }, { True: "true", 138: False, "false": 200 } ] l = [] l.extend(l0) l.append(l0) l.append(1) l.extend(l1) return l def build_test_data(destdir): l = get_test_data_list() for i in range(len(l)): # packer = msgpack.Packer() serialized = msgpack.dumps(l[i]) f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') f.write(serialized) f.close() serialized = cbor.dumps(l[i]) f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') f.write(serialized) f.close() def doRpcServer(port, stopTimeSec): class EchoHandler(object): def Echo123(self, msg1, msg2, msg3): return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) def EchoStruct(self, msg): return ("%s" % msg) addr = msgpackrpc.Address('localhost', port) server = msgpackrpc.Server(EchoHandler()) server.listen(addr) # run thread to stop it after stopTimeSec seconds if > 0 if stopTimeSec > 0: def myStopRpcServer(): server.stop() t = threading.Timer(stopTimeSec, myStopRpcServer) t.start() server.start() def doRpcClientToPythonSvc(port): address = msgpackrpc.Address('localhost', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("Echo123", "A1", "B2", "C3") print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doRpcClientToGoSvc(port): # print ">>>> port: ", port, " <<<<<" address = msgpackrpc.Address('localhost', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doMain(args): if len(args) == 2 and args[0] == "testdata": build_test_data(args[1]) elif len(args) == 3 and args[0] == "rpc-server": doRpcServer(int(args[1]), int(args[2])) elif len(args) == 2 and args[0] == "rpc-client-python-service": doRpcClientToPythonSvc(int(args[1])) elif len(args) == 2 and args[0] == "rpc-client-go-service": doRpcClientToGoSvc(int(args[1])) else: print("Usage: test.py " + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") if __name__ == "__main__": doMain(sys.argv[1:])
apache-2.0
SWE574-Groupago/heritago
heritago/heritages/serializers.py
1
9002
import django.dispatch from rest_framework import serializers from heritages.models import Heritage, BasicInformation, Origin, Tag, Multimedia, Selector, AnnotationTarget, \ AnnotationBody, Annotation, User class BasicInformationSerializer(serializers.ModelSerializer): class Meta: model = BasicInformation fields = ("name", "value") class OriginSerializer(serializers.ModelSerializer): class Meta: model = Origin fields = ("name",) class TagSerializer(serializers.ModelSerializer): class Meta: model = Tag fields = ("name",) class MultimediaSerializer(serializers.ModelSerializer): # Field specification for "write_only" attribute is a duplicate of "extra_kwargs" option in Meta class. # Consider removing the following line: # file = serializers.FileField(write_only=True, required=False) class Meta: model = Multimedia fields = ("createdAt", "url", "type", "id", "file", "meta") read_only_fields = ("id", "url") # "write_only_fields" is a PendingDeprecation and it is replaced with "extra_kwargs" / # Source: http://www.django-rest-framework.org/topics/3.0-announcement/#the-extra_kwargs-option extra_kwargs = {'file': {'write_only': True}} def create(self, validated_data): multimedia = Multimedia.objects.create(**validated_data) if multimedia.file: multimedia.url = "/heritages/{}/{}/{}.png".format( multimedia.heritage.id, multimedia.type, multimedia.id) multimedia.save() heritage_created.send(sender=HeritageSerializer, instance=multimedia.heritage) return multimedia heritage_created = django.dispatch.Signal(providing_args=["instance"]) class HeritageSerializer(serializers.ModelSerializer): basicInformation = BasicInformationSerializer(many=True) origin = OriginSerializer(many=True) tags = TagSerializer(many=True) multimedia = MultimediaSerializer(many=True, read_only=True) class Meta: model = Heritage fields = ( "id", "title", "description", "basicInformation", "createdAt", "updatedAt", "tags", "startDate", "endDate", "exactDate", "origin", "multimedia") read_only_fields = ("id",) def create(self, validated_data): basic_information = validated_data.pop("basicInformation") tags = validated_data.pop("tags") origin = validated_data.pop("origin") heritage = Heritage.objects.create(**validated_data) for entry in basic_information: BasicInformation.objects.create(heritage=heritage, **entry) for entry in origin: Origin.objects.create(heritage=heritage, **entry) for entry in tags: existing_tags = Tag.objects.filter(name=entry) if not existing_tags: Tag.objects.create(name=entry) heritage_tags = heritage.tags.filter(name=entry) if not heritage_tags: heritage.tags.add(*Tag.objects.get_or_create(**entry)) heritage_created.send(sender=self.__class__, instance=heritage) return heritage class SelectorSerializer(serializers.ModelSerializer): class Meta: model = Selector fields = ("type", "conformsTo", "value") class AnnotationTargetSerializer(serializers.ModelSerializer): selector = SelectorSerializer(many=True) class Meta: model = AnnotationTarget fields = ("target_id", "type", "format", "selector") def __init__(self, *args, **kwargs): super(AnnotationTargetSerializer, self).__init__(*args, **kwargs) self.fields["target_id"].label = "id" def create(self, validated_data): validated_selector = validated_data.pop("selector") target = AnnotationTarget.objects.create(**validated_data) for entry in validated_selector: Selector.objects.create(target=target, **entry) return target class AnnotationBodySerializer(serializers.ModelSerializer): class Meta: model = AnnotationBody fields = ("type", "value", "format") class AnnotationSerializer(serializers.ModelSerializer): body = AnnotationBodySerializer(many=True) target = AnnotationTargetSerializer(many=True) class Meta: model = Annotation fields = ("context", "annotation_id", "type", "motivation", "creator", "created", "body", "target") def to_representation(self, instance): data = super(AnnotationSerializer, self).to_representation(instance) data["@context"] = instance.context data["id"] = instance.annotation_id del data["context"] del data["annotation_id"] return data def create(self, validated_data): validated_body = validated_data.pop("body") validated_target = validated_data.pop("target") annotation = Annotation.objects.create(heritage=Heritage.objects.get(pk=self.context["heritage_id"]), **validated_data) for entry in validated_body: AnnotationBody.objects.create(annotation=annotation, **entry) for entry in validated_target: target = AnnotationTarget.objects.create(annotation=annotation, target_id=self.context["target_id"], type=entry["type"], format=entry["format"]) selector_data = entry.pop("selector") for data in selector_data: Selector.objects.create(target=target, type=data["type"], conformsTo=data["conformsTo"], value=data["value"]) return annotation class AnnotationPaleSerializer(serializers.ModelSerializer): body = AnnotationBodySerializer(many=True) target = AnnotationTargetSerializer(many=True) class Meta: model = Annotation fields = ("context", "annotation_id", "type", "motivation", "creator", "created", "body", "target") def to_representation(self, instance): data = super(AnnotationPaleSerializer, self).to_representation(instance) data["@context"] = instance.context data["id"] = instance.annotation_id del data["context"] del data["annotation_id"] return data def create(self, validated_data): validated_body = validated_data.pop("body") validated_target = validated_data.pop("target") annotation = Annotation.objects.create(**validated_data) for entry in validated_body: AnnotationBody.objects.create(annotation=annotation, **entry) for entry in validated_target: target = AnnotationTarget.objects.create(annotation=annotation, target_id=self.context["target_id"], type=entry["type"], format=entry["format"]) selector_data = entry.pop("selector") for data in selector_data: Selector.objects.create(target=target, type=data["type"], conformsTo=data["conformsTo"], value=data["value"]) return annotation class UserSerializer(serializers.ModelSerializer): password = serializers.CharField(write_only=True) class Meta: model = User fields = ("id", "first_name", "last_name", "username", "email", "password") read_only_fields = ["id"] def create(self, validated_data): password = validated_data["password"] user = User.objects.create(**validated_data) user.set_password(password) user.save() return user def update(self, instance, validated_data): instance.email = validated_data["email"] instance.first_name = validated_data["first_name"] instance.last_name = validated_data["last_name"] instance.set_password(validated_data["password"]) instance.save() return instance
mit
vied12/photoautomat-circus
sources/webassets/filter/cssutils.py
15
1507
from __future__ import absolute_import import logging import logging.handlers from webassets.filter import Filter __all__ = ('CSSUtils',) class CSSUtils(Filter): """Minifies CSS by removing whitespace, comments etc., using the Python `cssutils <http://cthedot.de/cssutils/>`_ library. Note that since this works as a parser on the syntax level, so invalid CSS input could potentially result in data loss. """ name = 'cssutils' def setup(self): import cssutils self.cssutils = cssutils try: # cssutils is unaware of so many new CSS3 properties, # vendor-prefixes etc., that it's diagnostic messages are rather # useless. Disable them. log = logging.getLogger('assets.cssutils') log.addHandler(logging.handlers.MemoryHandler(10)) # Newer versions of cssutils print a deprecation warning # for 'setlog'. if hasattr(cssutils.log, 'setLog'): func = cssutils.log.setLog else: func = cssutils.log.setlog func(log) except ImportError: # During doc generation, Django is not going to be setup and will # fail when the settings object is accessed. That's ok though. pass def output(self, _in, out, **kw): sheet = self.cssutils.parseString(_in.read()) self.cssutils.ser.prefs.useMinified() out.write(sheet.cssText.decode('utf-8'))
gpl-3.0
windedge/odoomrp-wip
stock_lock_lot/models/stock_production_lot.py
14
2870
# -*- coding: utf-8 -*- ############################################################################## # For copyright and license notices, see __openerp__.py file in root directory ############################################################################## from openerp import models, fields, api, exceptions, _ class StockProductionLot(models.Model): _name = 'stock.production.lot' _inherit = ['stock.production.lot', 'mail.thread'] _mail_post_access = 'read' _track = { 'locked': { 'stock_lock_lot.mt_lock_lot': lambda self, cr, uid, obj, ctx=None: obj.locked, 'stock_lock_lot.mt_unlock_lot': lambda self, cr, uid, obj, ctx=None: not obj.locked, }, } def _get_product_locked(self, product): """Should create locked? (including categories and parents) @param product: browse-record for product.product @return True when the category of the product or one of the parents demand new lots to be locked""" _locked = product.categ_id.lot_default_locked categ = product.categ_id.parent_id while categ and not _locked: _locked = categ.lot_default_locked categ = categ.parent_id return _locked @api.one def _get_locked_value(self): return self._get_product_locked(self.product_id) locked = fields.Boolean(string='Blocked', default='_get_locked_value', readonly=True) @api.one @api.onchange('product_id') def onchange_product_id(self): self.locked = self._get_product_locked(self.product_id) @api.multi def button_lock(self): stock_quant_obj = self.env['stock.quant'] for lot in self: cond = [('lot_id', '=', lot.id), ('reservation_id', '!=', False)] for quant in stock_quant_obj.search(cond): if quant.reservation_id.state not in ('cancel', 'done'): raise exceptions.Warning( _('Error! Serial Number/Lot "%s" currently has ' 'reservations.') % (lot.name)) return self.write({'locked': True}) @api.multi def button_unlock(self): return self.write({'locked': False}) @api.model def create(self, vals): product = self.env['product.product'].browse(vals.get('product_id')) vals['locked'] = self._get_product_locked(product) return super(StockProductionLot, self).create(vals) @api.one def write(self, values): if 'product_id' in values: product = self.env['product.product'].browse( values.get('product_id')) values['locked'] = self._get_product_locked(product) return super(StockProductionLot, self).write(values)
agpl-3.0
ZachRiegel/scriptbin
pypyjs/modules/distutils/tests/test_cmd.py
54
3840
"""Tests for distutils.cmd.""" import unittest import os from test.test_support import captured_stdout, run_unittest from distutils.cmd import Command from distutils.dist import Distribution from distutils.errors import DistutilsOptionError from distutils import debug class MyCmd(Command): def initialize_options(self): pass class CommandTestCase(unittest.TestCase): def setUp(self): dist = Distribution() self.cmd = MyCmd(dist) def test_ensure_string_list(self): cmd = self.cmd cmd.not_string_list = ['one', 2, 'three'] cmd.yes_string_list = ['one', 'two', 'three'] cmd.not_string_list2 = object() cmd.yes_string_list2 = 'ok' cmd.ensure_string_list('yes_string_list') cmd.ensure_string_list('yes_string_list2') self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, 'not_string_list') self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, 'not_string_list2') cmd.option1 = 'ok,dok' cmd.ensure_string_list('option1') self.assertEqual(cmd.option1, ['ok', 'dok']) cmd.option2 = ['xxx', 'www'] cmd.ensure_string_list('option2') cmd.option3 = ['ok', 2] self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, 'option3') def test_make_file(self): cmd = self.cmd # making sure it raises when infiles is not a string or a list/tuple self.assertRaises(TypeError, cmd.make_file, infiles=1, outfile='', func='func', args=()) # making sure execute gets called properly def _execute(func, args, exec_msg, level): self.assertEqual(exec_msg, 'generating out from in') cmd.force = True cmd.execute = _execute cmd.make_file(infiles='in', outfile='out', func='func', args=()) def test_dump_options(self): msgs = [] def _announce(msg, level): msgs.append(msg) cmd = self.cmd cmd.announce = _announce cmd.option1 = 1 cmd.option2 = 1 cmd.user_options = [('option1', '', ''), ('option2', '', '')] cmd.dump_options() wanted = ["command options for 'MyCmd':", ' option1 = 1', ' option2 = 1'] self.assertEqual(msgs, wanted) def test_ensure_string(self): cmd = self.cmd cmd.option1 = 'ok' cmd.ensure_string('option1') cmd.option2 = None cmd.ensure_string('option2', 'xxx') self.assertTrue(hasattr(cmd, 'option2')) cmd.option3 = 1 self.assertRaises(DistutilsOptionError, cmd.ensure_string, 'option3') def test_ensure_filename(self): cmd = self.cmd cmd.option1 = __file__ cmd.ensure_filename('option1') cmd.option2 = 'xxx' self.assertRaises(DistutilsOptionError, cmd.ensure_filename, 'option2') def test_ensure_dirname(self): cmd = self.cmd cmd.option1 = os.path.dirname(__file__) or os.curdir cmd.ensure_dirname('option1') cmd.option2 = 'xxx' self.assertRaises(DistutilsOptionError, cmd.ensure_dirname, 'option2') def test_debug_print(self): cmd = self.cmd with captured_stdout() as stdout: cmd.debug_print('xxx') stdout.seek(0) self.assertEqual(stdout.read(), '') debug.DEBUG = True try: with captured_stdout() as stdout: cmd.debug_print('xxx') stdout.seek(0) self.assertEqual(stdout.read(), 'xxx\n') finally: debug.DEBUG = False def test_suite(): return unittest.makeSuite(CommandTestCase) if __name__ == '__main__': run_unittest(test_suite())
gpl-3.0
Vaidyanath/tempest
tempest/openstack/common/gettextutils.py
11
18039
# Copyright 2012 Red Hat, Inc. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ gettext for openstack-common modules. Usual usage in an openstack.common module: from tempest.openstack.common.gettextutils import _ """ import copy import gettext import locale from logging import handlers import os from babel import localedata import six _AVAILABLE_LANGUAGES = {} # FIXME(dhellmann): Remove this when moving to oslo.i18n. USE_LAZY = False class TranslatorFactory(object): """Create translator functions """ def __init__(self, domain, localedir=None): """Establish a set of translation functions for the domain. :param domain: Name of translation domain, specifying a message catalog. :type domain: str :param lazy: Delays translation until a message is emitted. Defaults to False. :type lazy: Boolean :param localedir: Directory with translation catalogs. :type localedir: str """ self.domain = domain if localedir is None: localedir = os.environ.get(domain.upper() + '_LOCALEDIR') self.localedir = localedir def _make_translation_func(self, domain=None): """Return a new translation function ready for use. Takes into account whether or not lazy translation is being done. The domain can be specified to override the default from the factory, but the localedir from the factory is always used because we assume the log-level translation catalogs are installed in the same directory as the main application catalog. """ if domain is None: domain = self.domain t = gettext.translation(domain, localedir=self.localedir, fallback=True) # Use the appropriate method of the translation object based # on the python version. m = t.gettext if six.PY3 else t.ugettext def f(msg): """oslo.i18n.gettextutils translation function.""" if USE_LAZY: return Message(msg, domain=domain) return m(msg) return f @property def primary(self): "The default translation function." return self._make_translation_func() def _make_log_translation_func(self, level): return self._make_translation_func(self.domain + '-log-' + level) @property def log_info(self): "Translate info-level log messages." return self._make_log_translation_func('info') @property def log_warning(self): "Translate warning-level log messages." return self._make_log_translation_func('warning') @property def log_error(self): "Translate error-level log messages." return self._make_log_translation_func('error') @property def log_critical(self): "Translate critical-level log messages." return self._make_log_translation_func('critical') # NOTE(dhellmann): When this module moves out of the incubator into # oslo.i18n, these global variables can be moved to an integration # module within each application. # Create the global translation functions. _translators = TranslatorFactory('tempest') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical # NOTE(dhellmann): End of globals that will move to the application's # integration module. def enable_lazy(): """Convenience function for configuring _() to use lazy gettext Call this at the start of execution to enable the gettextutils._ function to use lazy gettext functionality. This is useful if your project is importing _ directly instead of using the gettextutils.install() way of importing the _ function. """ global USE_LAZY USE_LAZY = True def install(domain): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's install() function. The main difference from gettext.install() is that we allow overriding the default localedir (e.g. /usr/share/locale) using a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). Note that to enable lazy translation, enable_lazy must be called. :param domain: the translation domain """ from six import moves tf = TranslatorFactory(domain) moves.builtins.__dict__['_'] = tf.primary class Message(six.text_type): """A Message object is a unicode object that can be translated. Translation of Message is done explicitly using the translate() method. For all non-translation intents and purposes, a Message is simply unicode, and can be treated as such. """ def __new__(cls, msgid, msgtext=None, params=None, domain='tempest', *args): """Create a new Message object. In order for translation to work gettext requires a message ID, this msgid will be used as the base unicode text. It is also possible for the msgid and the base unicode text to be different by passing the msgtext parameter. """ # If the base msgtext is not given, we use the default translation # of the msgid (which is in English) just in case the system locale is # not English, so that the base text will be in that locale by default. if not msgtext: msgtext = Message._translate_msgid(msgid, domain) # We want to initialize the parent unicode with the actual object that # would have been plain unicode if 'Message' was not enabled. msg = super(Message, cls).__new__(cls, msgtext) msg.msgid = msgid msg.domain = domain msg.params = params return msg def translate(self, desired_locale=None): """Translate this message to the desired locale. :param desired_locale: The desired locale to translate the message to, if no locale is provided the message will be translated to the system's default locale. :returns: the translated message in unicode """ translated_message = Message._translate_msgid(self.msgid, self.domain, desired_locale) if self.params is None: # No need for more translation return translated_message # This Message object may have been formatted with one or more # Message objects as substitution arguments, given either as a single # argument, part of a tuple, or as one or more values in a dictionary. # When translating this Message we need to translate those Messages too translated_params = _translate_args(self.params, desired_locale) translated_message = translated_message % translated_params return translated_message @staticmethod def _translate_msgid(msgid, domain, desired_locale=None): if not desired_locale: system_locale = locale.getdefaultlocale() # If the system locale is not available to the runtime use English if not system_locale[0]: desired_locale = 'en_US' else: desired_locale = system_locale[0] locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') lang = gettext.translation(domain, localedir=locale_dir, languages=[desired_locale], fallback=True) if six.PY3: translator = lang.gettext else: translator = lang.ugettext translated_message = translator(msgid) return translated_message def __mod__(self, other): # When we mod a Message we want the actual operation to be performed # by the parent class (i.e. unicode()), the only thing we do here is # save the original msgid and the parameters in case of a translation params = self._sanitize_mod_params(other) unicode_mod = super(Message, self).__mod__(params) modded = Message(self.msgid, msgtext=unicode_mod, params=params, domain=self.domain) return modded def _sanitize_mod_params(self, other): """Sanitize the object being modded with this Message. - Add support for modding 'None' so translation supports it - Trim the modded object, which can be a large dictionary, to only those keys that would actually be used in a translation - Snapshot the object being modded, in case the message is translated, it will be used as it was when the Message was created """ if other is None: params = (other,) elif isinstance(other, dict): # Merge the dictionaries # Copy each item in case one does not support deep copy. params = {} if isinstance(self.params, dict): for key, val in self.params.items(): params[key] = self._copy_param(val) for key, val in other.items(): params[key] = self._copy_param(val) else: params = self._copy_param(other) return params def _copy_param(self, param): try: return copy.deepcopy(param) except Exception: # Fallback to casting to unicode this will handle the # python code-like objects that can't be deep-copied return six.text_type(param) def __add__(self, other): msg = _('Message objects do not support addition.') raise TypeError(msg) def __radd__(self, other): return self.__add__(other) if six.PY2: def __str__(self): # NOTE(luisg): Logging in python 2.6 tries to str() log records, # and it expects specifically a UnicodeError in order to proceed. msg = _('Message objects do not support str() because they may ' 'contain non-ascii characters. ' 'Please use unicode() or translate() instead.') raise UnicodeError(msg) def get_available_languages(domain): """Lists the available languages for the given translation domain. :param domain: the domain to get languages for """ if domain in _AVAILABLE_LANGUAGES: return copy.copy(_AVAILABLE_LANGUAGES[domain]) localedir = '%s_LOCALEDIR' % domain.upper() find = lambda x: gettext.find(domain, localedir=os.environ.get(localedir), languages=[x]) # NOTE(mrodden): en_US should always be available (and first in case # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was # renamed to locale_identifiers() in >=1.0, the requirements master list # requires >=0.9.6, uncapped, so defensively work with both. We can remove # this check when the master list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() for i in locale_identifiers: if find(i) is not None: language_list.append(i) # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they # are perfectly legitimate locales: # https://github.com/mitsuhiko/babel/issues/37 # In Babel 1.3 they fixed the bug and they support these locales, but # they are still not explicitly "listed" by locale_identifiers(). # That is why we add the locales here explicitly if necessary so that # they are listed as supported. aliases = {'zh': 'zh_CN', 'zh_Hant_HK': 'zh_HK', 'zh_Hant': 'zh_TW', 'fil': 'tl_PH'} for (locale_, alias) in six.iteritems(aliases): if locale_ in language_list and alias not in language_list: language_list.append(alias) _AVAILABLE_LANGUAGES[domain] = language_list return copy.copy(language_list) def translate(obj, desired_locale=None): """Gets the translated unicode representation of the given object. If the object is not translatable it is returned as-is. If the locale is None the object is translated to the system locale. :param obj: the object to translate :param desired_locale: the locale to translate the message to, if None the default system locale will be used :returns: the translated object in unicode, or the original object if it could not be translated """ message = obj if not isinstance(message, Message): # If the object to translate is not already translatable, # let's first get its unicode representation message = six.text_type(obj) if isinstance(message, Message): # Even after unicoding() we still need to check if we are # running with translatable unicode before translating return message.translate(desired_locale) return obj def _translate_args(args, desired_locale=None): """Translates all the translatable elements of the given arguments object. This method is used for translating the translatable values in method arguments which include values of tuples or dictionaries. If the object is not a tuple or a dictionary the object itself is translated if it is translatable. If the locale is None the object is translated to the system locale. :param args: the args to translate :param desired_locale: the locale to translate the args to, if None the default system locale will be used :returns: a new args object with the translated contents of the original """ if isinstance(args, tuple): return tuple(translate(v, desired_locale) for v in args) if isinstance(args, dict): translated_dict = {} for (k, v) in six.iteritems(args): translated_v = translate(v, desired_locale) translated_dict[k] = translated_v return translated_dict return translate(args, desired_locale) class TranslationHandler(handlers.MemoryHandler): """Handler that translates records before logging them. The TranslationHandler takes a locale and a target logging.Handler object to forward LogRecord objects to after translating them. This handler depends on Message objects being logged, instead of regular strings. The handler can be configured declaratively in the logging.conf as follows: [handlers] keys = translatedlog, translator [handler_translatedlog] class = handlers.WatchedFileHandler args = ('/var/log/api-localized.log',) formatter = context [handler_translator] class = openstack.common.log.TranslationHandler target = translatedlog args = ('zh_CN',) If the specified locale is not available in the system, the handler will log in the default locale. """ def __init__(self, locale=None, target=None): """Initialize a TranslationHandler :param locale: locale to use for translating messages :param target: logging.Handler object to forward LogRecord objects to after translation """ # NOTE(luisg): In order to allow this handler to be a wrapper for # other handlers, such as a FileHandler, and still be able to # configure it using logging.conf, this handler has to extend # MemoryHandler because only the MemoryHandlers' logging.conf # parsing is implemented such that it accepts a target handler. handlers.MemoryHandler.__init__(self, capacity=0, target=target) self.locale = locale def setFormatter(self, fmt): self.target.setFormatter(fmt) def emit(self, record): # We save the message from the original record to restore it # after translation, so other handlers are not affected by this original_msg = record.msg original_args = record.args try: self._translate_and_log_record(record) finally: record.msg = original_msg record.args = original_args def _translate_and_log_record(self, record): record.msg = translate(record.msg, self.locale) # In addition to translating the message, we also need to translate # arguments that were passed to the log method that were not part # of the main message e.g., log.info(_('Some message %s'), this_one)) record.args = _translate_args(record.args, self.locale) self.target.emit(record)
apache-2.0
svanscho/weatherlight
weatherlight-server/openweathermap.py
1
5038
import urllib2 import json from datetime import datetime import pytz import logging class Openweathermap(object): def __init__(self, appid): self.appid = appid #the openweathermaps application id you got when signing up for the API #return codes according to this definition http://openweathermap.org/weather-conditions def getForecastForDay(self, latitude, longitude, day): json_object = self.__forecast(latitude, longitude) days = json_object["list"] days = sorted(days, key=lambda days: days["dt"]) #sort by date return days[day]["weather"][0]["id"], int(round(days[day]["temp"]["max"])) def getForecastForDayByCityAndCountry(self, city, country_code, day): json_object = self.__forecastByCityAndCountry(city, country_code) days = json_object["list"] days = sorted(days, key=lambda days: days["dt"]) #sort by date return days[day]["weather"][0]["id"], int(round(days[day]["temp"]["max"])) def getSunset(self, latitude, longitude): json_object = self.__weather(latitude, longitude) sys_object = json_object["sys"] unix_time = int(sys_object["sunset"]) return datetime.fromtimestamp(unix_time, pytz.utc) def getSunsetByCityAndCountry(self, city, country_code): json_object = self.__weatherByCityAndCountry(city, country_code) sys_object = json_object["sys"] return datetime.fromtimestamp(int(sys_object["sunset"]), pytz.utc) #helper methods def __get_json(self, url): response = urllib2.urlopen(url) json_string = response.read() return json.loads(json_string) # { # "city": { # "id": 2174003, # "name": "Brisbane", # "coord": { # "lon": 153.028091, # "lat": -27.467939 # }, # "country": "AU", # "population": 0 # }, # "cod": "200", # "message": 0.0115, # "cnt": 2, # "list": [ # { # "dt": 1475802000, # "temp": { # "day": 302.05, # "min": 291.11, # "max": 303.66, # "night": 291.11, # "eve": 301.04, # "morn": 296.67 # }, # "pressure": 1022.44, # "humidity": 63, # "weather": [ # { # "id": 800, # "main": "Clear", # "description": "clear sky", # "icon": "01d" # } # ], # "speed": 1.61, # "deg": 0, # "clouds": 0 # }, # { # "dt": 1475888400, # "temp": { # "day": 298.05, # "min": 288.61, # "max": 302.57, # "night": 294.27, # "eve": 301.71, # "morn": 288.61 # }, # "pressure": 1019.83, # "humidity": 59, # "weather": [ # { # "id": 801, # "main": "Clouds", # "description": "few clouds", # "icon": "02d" # } # ], # "speed": 2.46, # "deg": 297, # "clouds": 12 # } # ] # } def __forecastByCityAndCountry(self, city, country_code): url = "http://api.openweathermap.org/data/2.5/forecast/daily?q=%s,%s&APPID=%s&cnt=16" % (city, country_code, self.appid) return self.__get_json(url) def __forecast(self, latitude, longitude): url = "http://api.openweathermap.org/data/2.5/forecast/daily?lat=%s&lon=%s&APPID=%s&cnt=16" % (latitude,longitude, self.appid) return self.__get_json(url) #e.g. weather response # { # "coord": { # "lon": 153.03, # "lat": -27.47 # }, # "weather": [ # { # "id": 800, # "main": "Clear", # "description": "clear sky", # "icon": "01n" # } # ], # "base": "stations", # "main": { # "temp": 296.694, # "pressure": 1025.52, # "humidity": 60, # "temp_min": 296.694, # "temp_max": 296.694, # "sea_level": 1037.97, # "grnd_level": 1025.52 # }, # "wind": { # "speed": 1.86, # "deg": 352.501 # }, # "clouds": { # "all": 0 # }, # "dt": 1475796100, # "sys": { # "message": 0.0083, # "country": "AU", # "sunrise": 1475695231, # "sunset": 1475740278 # }, # "id": 2174003, # "name": "Brisbane", # "cod": 200 # } def __weatherByCityAndCountry(self, city, country_code): url = "http://api.openweathermap.org/data/2.5/weather?q=%s,%s&APPID=%s" % (city, country_code, self.appid) return self.__get_json(url) def __weather(self, latitude, longitude): url = "http://api.openweathermap.org/data/2.5/weather?lat=%s&lon=%s&APPID=%s" % (latitude,longitude, self.appid) return self.__get_json(url)
mit
fabaff/ansible
lib/ansible/cli/doc.py
17
11990
# (c) 2014, James Tanner <tanner.jc@gmail.com> # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # ansible-vault is a script that encrypts/decrypts YAML files. See # http://docs.ansible.com/playbooks_vault.html for more details. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime import os import traceback import textwrap from ansible.compat.six import iteritems from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.plugins import module_loader from ansible.cli import CLI from ansible.utils import module_docs try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class DocCLI(CLI): """ Vault command line class """ BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt') IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "test-docs.sh"] def __init__(self, args): super(DocCLI, self).__init__(args) self.module_list = [] def parse(self): self.parser = CLI.base_parser( usage='usage: %prog [options] [module...]', epilog='Show Ansible module documentation', module_opts=True, ) self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir', help='List available modules') self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet', help='Show playbook snippet for specified module(s)') self.options, self.args = self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity def run(self): super(DocCLI, self).run() if self.options.module_path is not None: for i in self.options.module_path.split(os.pathsep): module_loader.add_directory(i) # list modules if self.options.list_dir: paths = module_loader._get_paths() for path in paths: self.find_modules(path) self.pager(self.get_module_list_text()) return 0 if len(self.args) == 0: raise AnsibleOptionsError("Incorrect options passed") # process command line module list text = '' for module in self.args: try: # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) continue if any(filename.endswith(x) for x in self.BLACKLIST_EXTS): continue try: doc, plainexamples, returndocs = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0)) except: display.vvv(traceback.print_exc()) display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module) continue if doc is not None: all_keys = [] for (k,v) in iteritems(doc['options']): all_keys.append(k) all_keys = sorted(all_keys) doc['option_keys'] = all_keys doc['filename'] = filename doc['docuri'] = doc['module'].replace('_', '-') doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['plainexamples'] = plainexamples doc['returndocs'] = returndocs if self.options.show_snippet: text += self.get_snippet_text(doc) else: text += self.get_man_text(doc) else: # this typically means we couldn't even parse the docstring, not just that the YAML is busted, # probably a quoting issue. raise AnsibleError("Parsing produced an empty object.") except Exception as e: display.vvv(traceback.print_exc()) raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e))) self.pager(text) return 0 def find_modules(self, path): if os.path.isdir(path): for module in os.listdir(path): if module.startswith('.'): continue elif os.path.isdir(module): self.find_modules(module) elif any(module.endswith(x) for x in self.BLACKLIST_EXTS): continue elif module.startswith('__'): continue elif module in self.IGNORE_FILES: continue elif module.startswith('_'): fullpath = '/'.join([path,module]) if os.path.islink(fullpath): # avoids aliases continue module = os.path.splitext(module)[0] # removes the extension self.module_list.append(module) def get_module_list_text(self): columns = display.columns displace = max(len(x) for x in self.module_list) linelimit = columns - displace - 5 text = [] deprecated = [] for module in sorted(set(self.module_list)): if module in module_docs.BLACKLIST_MODULES: continue # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: continue if filename.endswith(".ps1"): continue if os.path.isdir(filename): continue try: doc, plainexamples, returndocs = module_docs.get_docstring(filename) desc = self.tty_ify(doc.get('short_description', '?')).strip() if len(desc) > linelimit: desc = desc[:linelimit] + '...' if module.startswith('_'): # Handle deprecated deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) else: text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) except: raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module) if len(deprecated) > 0: text.append("\nDEPRECATED:") text.extend(deprecated) return "\n".join(text) @staticmethod def print_paths(finder): ''' Returns a string suitable for printing of the search path ''' # Uses a list to get the order right ret = [] for i in finder._get_paths(): if i not in ret: ret.append(i) return os.pathsep.join(ret) def get_snippet_text(self, doc): text = [] desc = CLI.tty_ify(doc['short_description']) text.append("- name: %s" % (desc)) text.append(" action: %s" % (doc['module'])) pad = 31 subdent = ''.join([" " for a in xrange(pad)]) limit = display.columns - pad for o in sorted(doc['options'].keys()): opt = doc['options'][o] desc = CLI.tty_ify(" ".join(opt['description'])) if opt.get('required', False): s = o + "=" else: s = o text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent))) text.append('') return "\n".join(text) def get_man_text(self, doc): opt_indent=" " text = [] text.append("> %s\n" % doc['module'].upper()) pad = display.columns * 0.20 limit = max(display.columns - int(pad), 70) if isinstance(doc['description'], list): desc = " ".join(doc['description']) else: desc = doc['description'] text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" ")) if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0: text.append("DEPRECATED: \n%s\n" % doc['deprecated']) if 'option_keys' in doc and len(doc['option_keys']) > 0: text.append("Options (= is mandatory):\n") for o in sorted(doc['option_keys']): opt = doc['options'][o] if opt.get('required', False): opt_leadin = "=" else: opt_leadin = "-" text.append("%s %s" % (opt_leadin, o)) if isinstance(opt['description'], list): desc = " ".join(opt['description']) else: desc = opt['description'] if 'choices' in opt: choices = ", ".join(str(i) for i in opt['choices']) desc = desc + " (Choices: " + choices + ")" if 'default' in opt: default = str(opt['default']) desc = desc + " [Default: " + default + "]" text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0: notes = " ".join(doc['notes']) text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), limit-6, initial_indent=" ", subsequent_indent=opt_indent)) if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0: req = ", ".join(doc['requirements']) text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent)) if 'examples' in doc and len(doc['examples']) > 0: text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's')) for ex in doc['examples']: text.append("%s\n" % (ex['code'])) if 'plainexamples' in doc and doc['plainexamples'] is not None: text.append("EXAMPLES:") text.append(doc['plainexamples']) if 'returndocs' in doc and doc['returndocs'] is not None: text.append("RETURN VALUES:") text.append(doc['returndocs']) text.append('') maintainers = set() if 'author' in doc: if isinstance(doc['author'], basestring): maintainers.add(doc['author']) else: maintainers.update(doc['author']) if 'maintainers' in doc: if isinstance(doc['maintainers'], basestring): maintainers.add(doc['author']) else: maintainers.update(doc['author']) text.append('MAINTAINERS: ' + ', '.join(maintainers)) text.append('') return "\n".join(text)
gpl-3.0
jkliff/project_walker
project-walker-core/src/python/main/Checkers.py
1
9326
#!/usr/bin/python # -*- coding: utf-8 -*- import os.path import re import subprocess try: from lxml import etree except ImportError: try: import xml.etree.cElementTree as etree except: import elementtree.ElementTree as etree import ProjectWalker import GlobMatch from interpol import interpol class MavenPomChecker(ProjectWalker.Checker): def __init__(self, vars, config): ProjectWalker.Checker.__init__(self, self.__class__, vars, config) self.addOption('mavenVersion', default=2) self.addOption('useParent', default=False) self.addOption('usePackaging', default=False) self.addOption('versionInDependencies', default=True) self.addOption('dependencyVersions', default=[]) self.addAcceptRule(lambda f: f.file_attrs['file_name'] == 'pom.xml') def eval(self, node): ns = {'p': 'http://maven.apache.org/POM/4.0.0'} path = node.file_attrs['full_path'] with open(path) as f: doc = etree.parse(f) if self.useParent: parent = doc.getroot().xpath('/p:project/p:parent', namespaces=ns) if parent == []: self.addResult('No parent pom defined in [{}]!'.format(path)) if self.usePackaging: packaging = doc.getroot().xpath('/p:project/p:packaging', namespaces=ns) if packaging == []: self.addResult('No packaging defined in [{}]!'.format(path)) if self.dependencyVersions and not self.dependencyVersions['versionsAllowed']: ex_res = [] for d in doc.getroot().xpath('/p:project/p:dependencies/p:dependency', namespaces=ns): artifact = d.xpath('p:artifactId', namespaces=ns)[0].text if self.__isExcluded(artifact): continue if d.xpath('p:version', namespaces=ns): self.addResult('Version used in [{}] for artifact [{}]!'.format(path, artifact)) def __isExcluded(self, artifact): if 'excludeArtifact' not in self.dependencyVersions: return False if 'excludeRegexes' in self.dependencyVersions: res = self.dependencyVersions['excludeRegexes'] else: res = [] patterns = [] ea = self.dependencyVersions['excludeArtifact'] if type(ea) == list: patterns = ea else: patterns.append(ea) for p in patterns: res.append(re.compile('^{}$'.format(p))) self.dependencyVersions['excludeRegexes'] = res for r in res: if r.match(artifact): return True return False class ExternalChecker(ProjectWalker.Checker): def __init__(self, vars, config): ProjectWalker.Checker.__init__(self, self.__class__, vars, config) self.addOption('files', isList=True, default=[]) self.addOption('excludeFiles', isList=True, default=[]) self.addOption('command', isList=True) self.parseOptions() self.setUpIncludesExcludes(self.files, self.excludeFiles) self.cmds = [] for tcmd in self.command: self.cmds.append(interpol(vars, tcmd)) def eval(self, node): for cmd in self.cmds: self.addResult(self.callCommand(interpol(node.file_attrs, cmd))) def callCommand(self, cmd): try: subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) return None except subprocess.CalledProcessError, e: return e.output except: return 'Unknown error occured on calling [{}]'.format(cmd) class FileExistsChecker(ProjectWalker.Checker): def __init__(self, vars, config): ProjectWalker.Checker.__init__(self, self.__class__, vars, config) self.fileCount = {} self.addOption('requiredFiles', isList=True) self.addOption('count', default=-1) self.parseOptions() for f in self.requiredFiles: self.fileCount[GlobMatch.prepare(self.interpolatePathExpression(f))] = 0 def eval(self, node): result = [] for gb in self.fileCount.iterkeys(): # handle absolute and relative paths differently if gb.match(node.file_attrs['full_path']): self.fileCount[gb] = self.fileCount[gb] + 1 return None def evalOnEnd(self): for (f, c) in self.fileCount.iteritems(): if c < 1 and self.count == -1: self.addResult('Could not find file [{}]'.format(f)) elif c != self.count and self.count != -1: self.addResult('Found file [{}] {} time(s), required {}.'.format(f, c, self.count)) class FileContainsChecker(ProjectWalker.Checker): def __init__(self, vars, config): ProjectWalker.Checker.__init__(self, self.__class__, vars, config) self.addOption('files', isList=True, default=[]) self.addOption('excludeFiles', isList=True, default=[]) self.addOption('caseSensitive', default=True) self.parseOptions() self.setUpIncludesExcludes(self.files, self.excludeFiles) if self.caseSensitive: self.reOption = 0 else: self.reOption = re.IGNORECASE def eval(self, node): current_config = self.interpolateNode(node) fpath = node.file_attrs['full_path'] contains = {} for c in current_config['contains']: contains[c] = {} contains[c]['re'] = re.compile(c, self.reOption) contains[c]['found'] = False try: f = open(fpath, 'r') for l in f: for (c_line, c_vals) in contains.iteritems(): if c_vals['re'].search(l): c_vals['found'] = True continue for (c_line, c_vals) in contains.iteritems(): if not c_vals['found']: self.addResult('Could not find line [{}] in file [{}].'.format(c_line, fpath)) except IOError: return self.addResult('Could not open file [{}]'.format(fpath)) class FileNameChecker(ProjectWalker.Checker): def __init__(self, vars, config): ProjectWalker.Checker.__init__(self, self.__class__, vars, config) self.addOption('files', isList=True, default=[]) self.addOption('excludeFiles', isList=True, default=[]) self.addOption('matches') self.parseOptions() self.setUpIncludesExcludes(self.files, self.excludeFiles) self.r = re.compile(self.matches) def eval(self, node): result = [] n = node.file_attrs['file_name'] p = node.file_attrs['full_path'] if not self.r.match(n): self.addResult('File [{}] does not match [{}]!'.format(p, self.matches)) class FilePropertyChecker(ProjectWalker.Checker): def __init__(self, vars, config): ProjectWalker.Checker.__init__(self, self.__class__, vars, config) self.addOption('files', isList=True, default=[]) self.addOption('excludeFiles', isList=True, default=[]) self.addOption('encoding', default='utf8') self.addOption('lineEnding', default='unix') self.addOption('whitespace', default='space') self.addOption('trailingWhitespace', default=True) self.addOption('lineLength', default=120) self.parseOptions() self.setUpIncludesExcludes(self.files, self.excludeFiles) if self.lineEnding == 'unix': self.er = re.compile('.*[^\r]\n$') else: self.er = re.compile('.*\r\n$') if self.whitespace == 'space': self.sp = '\t' self.wopp = 'tab' else: self.sp = ' ' self.wopp = 'space' self.trr = re.compile('\S*[ \t]+$') self.wrongEndingFound = False self.addDenyRule(lambda f: f.file_attrs['type'] == 'd') def eval(self, node): path = node.file_attrs['full_path'] try: with open(path, 'r') as f: i = 0 for l in f: i = i + 1 ll = len(l) if ll > 1 and not self.wrongEndingFound and not self.er.match(l): self.addResult('Line ending of file [{}] is not [{}]!'.format(path, self.ending)) self.wrongEndingFound = True chrp = l.find(self.sp) if chrp > -1: self.addResult('[{}] found instead of [{}] in file [{}] in line [{}] at char position [{}]!'.format(self.wopp, self.whitespace, path, i, chrp)) if not self.trailingWhitespace and ll > 1 and self.trr.match(l): self.addResult('Trailing whitespace found in file [{}] in line [{}]!'.format(path, i)) if ll > self.lineLength: self.addResult('Line [{}] in file [{}] is longer than [{}]!'.format(i, path, self.lineLength)) except IOError: self.addResult('Encoding of file [{}] is not [{}]!'.format(path, self.encoding))
bsd-3-clause
apache/airflow
airflow/providers/google/cloud/hooks/text_to_speech.py
2
5609
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module contains a Google Cloud Text to Speech Hook.""" from typing import Dict, Optional, Sequence, Union from google.api_core.retry import Retry from google.cloud.texttospeech_v1 import TextToSpeechClient from google.cloud.texttospeech_v1.types import ( AudioConfig, SynthesisInput, SynthesizeSpeechResponse, VoiceSelectionParams, ) from airflow.providers.google.common.hooks.base_google import GoogleBaseHook class CloudTextToSpeechHook(GoogleBaseHook): """ Hook for Google Cloud Text to Speech API. All the methods in the hook where project_id is used must be called with keyword arguments rather than positional. :param gcp_conn_id: The connection ID to use when fetching connection info. :type gcp_conn_id: str :param delegate_to: The account to impersonate using domain-wide delegation of authority, if any. For this to work, the service account making the request must have domain-wide delegation enabled. :type delegate_to: str :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account. :type impersonation_chain: Union[str, Sequence[str]] """ def __init__( self, gcp_conn_id: str = "google_cloud_default", delegate_to: Optional[str] = None, impersonation_chain: Optional[Union[str, Sequence[str]]] = None, ) -> None: super().__init__( gcp_conn_id=gcp_conn_id, delegate_to=delegate_to, impersonation_chain=impersonation_chain, ) self._client = None # type: Optional[TextToSpeechClient] def get_conn(self) -> TextToSpeechClient: """ Retrieves connection to Cloud Text to Speech. :return: Google Cloud Text to Speech client object. :rtype: google.cloud.texttospeech_v1.TextToSpeechClient """ if not self._client: self._client = TextToSpeechClient( credentials=self._get_credentials(), client_info=self.client_info ) return self._client @GoogleBaseHook.quota_retry() def synthesize_speech( self, input_data: Union[Dict, SynthesisInput], voice: Union[Dict, VoiceSelectionParams], audio_config: Union[Dict, AudioConfig], retry: Optional[Retry] = None, timeout: Optional[float] = None, ) -> SynthesizeSpeechResponse: """ Synthesizes text input :param input_data: text input to be synthesized. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesisInput :type input_data: dict or google.cloud.texttospeech_v1.types.SynthesisInput :param voice: configuration of voice to be used in synthesis. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.VoiceSelectionParams :type voice: dict or google.cloud.texttospeech_v1.types.VoiceSelectionParams :param audio_config: configuration of the synthesized audio. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.AudioConfig :type audio_config: dict or google.cloud.texttospeech_v1.types.AudioConfig :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :return: SynthesizeSpeechResponse See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesizeSpeechResponse :rtype: object """ client = self.get_conn() self.log.info("Synthesizing input: %s", input_data) return client.synthesize_speech( input_=input_data, voice=voice, audio_config=audio_config, retry=retry, timeout=timeout )
apache-2.0
MST-MRR/DroneKit
Flight/AirTrafficControl.py
1
21189
############################################ # This file contains a wrapper class # for DroneKit related operations # for our drone. ############################################ # Multi-Rotor Robot Design Team # Missouri University of Science Technology # Spring 2017 # Lucas Coon, Mark Raymond Jr. # pylint: disable=C, F, I, R, W from datetime import datetime, timedelta from os import system import sys from time import sleep from copy import deepcopy from Scanse import LIDAR # from Vision import * # import "serverClient as SC"#import the client side of the server to recieve the distance data import dronekit import math import os import time import threading import serial import RealSense class DroneAttitude(): def __init__(self, roll, pitch, yaw): self.pitch_deg = pitch self.yaw_deg = yaw self.roll_deg = roll self.pitch = math.radians(pitch) self.yaw = math.radians(yaw) self.roll = math.radians(roll) self.quaternion = self.get_quaternion() def get_quaternion(self): q = [] t0 = math.cos(self.yaw * 0.5) t1 = math.sin(self.yaw * 0.5) t2 = math.cos(self.roll * 0.5) t3 = math.sin(self.roll * 0.5) t4 = math.cos(self.pitch * 0.5) t5 = math.sin(self.pitch * 0.5) w = t0 * t2 * t4 + t1 * t3 * t5 x = t0 * t3 * t4 - t1 * t2 * t5 y = t0 * t2 * t5 + t1 * t3 * t4 z = t1 * t2 * t4 - t0 * t3 * t5 q.append(w) q.append(x) q.append(y) q.append(z) return q class StandardAttitudes(object): level = DroneAttitude(0,0,0) forward = DroneAttitude(0,-5,0) backward = DroneAttitude(0,5,0) left = DroneAttitude(-5, 0, 0) right = DroneAttitude(5, 0, 0) class StandardThrusts(object): none = 0.00 low = 0.25 land = 0.25 hover = 0.525 takeoff = 0.75 full = 1.00 class VehicleStates(object): hover = "HOVER" flying = "FLYING" takeoff = "TAKEOFF" unknown = "UNKNOWN" avoidance = "AVOIDANCE" landing = "LANDING" landed = "LANDED" class Tower(object): SIMULATOR = "tcp:127.0.0.1:5760" USB = "/dev/serial/by-id/usb-3D_Robotics_PX4_FMU_v2.x_0-if00" USB_DEV = "/dev/cu.usbmodem1" BEBOP = "tcp:192.168.42.1:14550" STANDARD_ATTITUDE_BIT_FLAGS = 0b00111111 NED_VELOCITY_BIT_FLAGS = 0b0000111111000111 FLIP_ATTITUDE_BIT_FLAGS = 0b00111000 STANDARD_THRUST_CHANGE = 0.05 MAX_TURN_TIME = 5 LAND_ALTITUDE = 0.5 TURN_START_VELOCITY = 3 TURN_RADIUS = 0.5 # Meters STANDARD_ANGLE_ADJUSTMENT = 1.0 MESSAGE_WAIT_TIME = 0.01 ACCEL_NOISE_THRESHOLD = 0.05 MAX_ANGLE_ALL_AXIS = 15.0 BATTERY_FAILSAFE_VOLTAGE = 9.25 STANDARD_SLEEP_TIME = 1 STANDARD_MATCH_ALTITUDE = 2.0 MAV_FRAME_LOCAL_NED = 1 MIN_REALSENSE_DISTANCE_CM = 30 MAX_REALSENSE_DISTANCE_CM = 1000 MIN_LIDAR_DISTANCE = 50 MAX_LIDAR_DISTANCE = 40000 MAV_SENSOR_ROTATION_PITCH_270 = 25 MAV_RANGEFINDER = 10 MAV_PERIPHERAL_ID = 195 GIMBAL_PORTRAIT = "86 0 " TAKEOFF_HEIGHT = 1; def __init__(self): self.start_time = 0 self.flight_log = None self.vehicle_initialized = False self.vehicle = None self.initial_yaw = 0 self.scanField = False self.realsense_range_finder = None self.scanse = None self.LAST_ATTITUDE = StandardAttitudes.level self.LAST_THRUST = StandardThrusts.none self.STATE = VehicleStates.unknown def initialize(self, should_write_to_file=False, enable_realsense=False, enable_lidar=False): """ @purpose: Connect to the flight controller, start the failsafe thread, switch to GUIDED_NOGPS, and open a file to begin logging. @args: @returns: """ if(not self.vehicle_initialized): if(should_write_to_file): self.flight_log = open('flight_log.txt', 'w') sys.stdout = self.flight_log print("\nConnecting via USB to PixHawk...") self.vehicle = dronekit.connect(self.BEBOP, wait_ready=True) self.vehicle = dronekit.connect(self.USB, wait_ready=True) if not self.vehicle: print("\nUnable to connect to vehicle.") return self.vehicle.mode = dronekit.VehicleMode("STABILIZE") self.STATE = VehicleStates.landed self.vehicle_initialized = True if(enable_realsense): self.realsense_range_finder = RealSense.RangeFinder() self.realsense_range_finder.initialize_camera() self.vehicle.parameters['RNGFND_TYPE'] = self.MAV_RANGEFINDER if(enable_lidar): self.scanse = LIDAR() self.scanse.connect_to_lidar() self.failsafes = FailsafeController(self) self.failsafes.start() self.start_time = time.time() self.switch_control() print("\nSuccessfully connected to vehicle.") def shutdown(self): """ @purpose: Stop all operations and cleanup the vehicle object. @args: @returns: """ self.failsafes.join() self.vehicle.close() if(self.flight_log): self.flight_log.close() self.vehicle_initialized = False self.start_time = 0 def arm_drone(self): """ @purpose: Arm the vehicle. @args: @returns: """ self.vehicle.armed = True while(not self.vehicle.armed): sleep(self.STANDARD_SLEEP_TIME) def disarm_drone(self): """ @purpose: Disarm the vehicle. @args: @returns: """ self.vehicle.armed = False while(self.vehicle.armed): sleep(self.STANDARD_SLEEP_TIME) def switch_control(self, mode_name="GUIDED_NOGPS"): """ @purpose: Switch the mode to GUIDED_NOGPS and make sure that the failsafe thread is running. @args: @returns: """ if not self.failsafes: self.failsafes = FailsafeController(self) self.failsafes.start() if self.vehicle.mode.name != mode_name: self.vehicle.mode = dronekit.VehicleMode(mode_name) while(self.vehicle.mode.name != mode_name): sleep(self.STANDARD_SLEEP_TIME) def get_uptime(self): """ @purpose: Get up time of this object. @args: @returns: """ uptime = time.time() - self.start_time return uptime def map(self, x, in_min, in_max, out_min, out_max): """ @purpose: Re-maps a number from one range to another. @args: x: the number to map in_min: the lower bound of the value's current range in_max: the upper bound of the value's current range out_min: the lower bound of the value's target range out_max: the upper bound of the value's target range @returns: The mapped value. """ return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min def set_angle_thrust(self, attitude, thrust): """ @purpose: Send a specified attitude message to the flight controller. For more information, see http://mavlink.org/messages/common#SET_ATTITUDE_TARGET. @args: attitude: A DroneAtittude object containing a target attitude. thrust: A collective thrust from 0 to 1. Thrust is converted to a climb rate internally by the flight controller. Therefore, thrusts from 0.51 to 1 are climb rates and thrusts from 0.49 to 0 are descent rates. 0.50 attempts to maintain a hover. @returns: """ while(self.vehicle.mode.name != "GUIDED_NOGPS"): sleep(self.STANDARD_SLEEP_TIME) message = self.vehicle.message_factory.set_attitude_target_encode( 0, # Timestamp in milliseconds since system boot (not used). 0, # System ID 0, # Component ID self.STANDARD_ATTITUDE_BIT_FLAGS, # Bit flags. attitude.quaternion, # Attitude quaternion. 0, # Body roll rate. 0, # Body pitch rate. 0, # Body yaw rate. thrust # Collective thrust, from 0-1. ) self.vehicle.send_mavlink(message) self.vehicle.commands.upload() self.last_attitude = attitude self.last_thrust = thrust #mission one uses the drone, x, y, or z direction, desired speed, distance desired and the height that is wanted #Utilizes "send_ned_velocity()" so this requires an Optical Flow sensor or GPS to be enabled to utilize this command def smo_attitudes(self,distance,height): self.STATE = VehicleStates.takeoff self.arm_drone() self.switch_control() initial_alt = self.vehicle.location.global_relative_frame.alt while((self.vehicle.location.global_relative_frame.alt - initial_alt) < height): self.set_angle_thrust(StandardAttitudes.level, StandardThrusts.takeoff) sleep(self.STANDARD_SLEEP_TIME) print('Reached target altitude:{0:.2f}m'.format(self.vehicle.location.global_relative_frame.alt)) #utilizes desired direction that was input to move self.vehicle.mode = dronekit.VehicleMode("GUIDED_NoGPS") #for X axis initial_lat = self.vehicle.location.global_relative_frame.lat #for Y axis initial_lon = self.vehicle.location.global_relative_frame.lon while((self.vehicle.location.global_relative_frame.lon - initial_lon) < distance): self.set_angle_thrust(StandardAttitudes.forward, StandardThrusts.hover) print "Reached target distance. \nNow Landing!" self.land() def smo_guided(self): self.switch_control(mode_name="GUIDED") self.arm_drone() self.vehicle.simple_takeoff(self.STANDARD_MATCH_ALTITUDE) sleep(5) self.send_ned_velocity(0.5, 0, -0.1) self.send_ned_velocity(0.5, 0, -0.1) self.send_ned_velocity(0.5, 0, -0.1) self.send_ned_velocity(0.5, 0, -0.1) self.hover() sleep(5) self.land() # def follow_guided(self, distance, angle): # self.switch_control(mode_name="GUIDED") # initial_angle = 'angle' # initial_distance = 'distance' # #for X axis # initial_lat = self.vehicle.location.global_relative_frame.lat #Left and Right # #for Y axis # initial_lon = self.vehicle.location.global_relative_frame.lon #Forward and Back # initial_hypot = sqrt((initial_angle*initial_angle)+(initial_distance*initial_distance)) # while((sqrt((self.vehicle.location.global_relative_frame.lon*self.vehicle.location.global_relative_frame.lon)+(self.vehicle.location.global_relative_frame.lat*self.vehicle.location.global_relative_frame.lat))) < initial_hypot) # self.send_ned_velocity() # distance = () def send_ned_velocity(self, velocity_x, velocity_y, velocity_z): """ Move vehicle in direction based on specified velocity vectors. """ message = self.vehicle.message_factory.set_position_target_local_ned_encode( 0, # time_boot_ms (not used) 0, 0, # target system, target component self.MAV_FRAME_LOCAL_NED, # frame self.NED_VELOCITY_BIT_FLAGS, # type_mask (only speeds enabled) 1, 1, 1, # x, y, z positions velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s 0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink) 0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink) self.vehicle.send_mavlink(message) self.vehicle.commands.upload() sleep(0.1) def send_distance_message(self): distance = self.realsense_range_finder.get_average_depth() message = self.vehicle.message_factory.distance_sensor_encode( 0, # time since system boot, not used self.MIN_REALSENSE_DISTANCE_CM, # min distance cm self.MAX_REALSENSE_DISTANCE_CM, # max distance cm distance, # current distance, must be int 0, # type = laser 0, # onboard id, not used self.MAV_SENSOR_ROTATION_PITCH_270, # must be set to MAV_SENSOR_ROTATION_PITCH_270 0 # covariance, not used ) self.vehicle.send_mavlink(message) self.vehicle.commands.upload() sleep(0.1) def send_distance_lidar_message(self): distance = None sensor_rotation = None for data in self.scanse.get_lidar_data(): distance = data[0] sensor_rotation = data[1] message = self.vehicle.message_factory.distance_sensor_encode( 0, # time since system boot, not used self.MIN_LIDAR_DISTANCE, # min distance cm self.MAX_LIDAR_DISTANCE, # max distance cm distance, # current distance, must be int 0, # type = laser 0, # onboard id, not used self.MAV_PERIPHERAL_ID, # onboard id, not used sensor_rotation, # sensor rotation 0 # covariance, not used ) self.vehicle.send_mavlink(message) self.vehicle.commands.upload() def hover(self): self.switch_control("GUIDED") self.send_ned_velocity(0, 0, 0) def takeoff(self, target_altitude): self.STATE = VehicleStates.takeoff self.initial_yaw = self.vehicle.attitude.yaw self.arm_drone() self.switch_control() initial_alt = self.vehicle.location.global_relative_frame.alt while((self.vehicle.location.global_relative_frame.alt - initial_alt) < target_altitude): self.set_angle_thrust(DroneAttitude(0,0, math.radians(self.initial_yaw)), StandardThrusts.takeoff) sleep(self.STANDARD_SLEEP_TIME) print('Reached target altitude:{0:.2f}m'.format(self.vehicle.location.global_relative_frame.alt)) def guided_takeoff(self, target_altitude): self.initial_yaw = self.vehicle.attitude.yaw self.switch_control(mode_name="GUIDED") self.arm_drone() self.vehicle.simple_takeoff(target_altitude) self.hover() def roomba_takeoff(self): initial_alt = self.vehicle.location.global_relative_frame.alt while((self.vehicle.location.global_relative_frame.alt - initial_alt) < TAKEOFF_HEIGHT): self.set_angle_thrust(DroneAttitude(0,0, math.radians(self.initial_yaw)), StandardThrusts.takeoff) sleep(self.STANDARD_SLEEP_TIME) print('Reached target altitude:{0:.2f}m'.format(self.vehicle.location.global_relative_frame.alt)) def fly_for_time(self, duration, direction, target_velocity, should_hover_on_finish): end_manuever = datetime.now() + timedelta(seconds=duration) self.STATE = VehicleStates.flying self.set_angle_thrust(direction, StandardThrusts.hover) while(datetime.now() < end_manuever): print(self.vehicle.airspeed,) print(self.vehicle.velocity,) updated_attitude = deepcopy(self.LAST_ATTITUDE) if(self.vehicle.airspeed < target_velocity): updated_attitude.pitch_deg -= 1 elif(self.vehicle.airspeed > target_velocity): updated_attitude.pitch_deg += 1 else: updated_attitude.pitch_deg = direction.pitch_deg if(updated_attitude.pitch_deg < -self.MAX_ANGLE_ALL_AXIS): updated_attitude.pitch_deg = -self.MAX_ANGLE_ALL_AXIS if(updated_attitude.pitch_deg > self.MAX_ANGLE_ALL_AXIS): updated_attitude.pitch_deg = self.MAX_ANGLE_ALL_AXIS if(updated_attitude.roll_deg < -self.MAX_ANGLE_ALL_AXIS): updated_attitude.roll_deg = -self.MAX_ANGLE_ALL_AXIS if(updated_attitude.roll_deg > self.MAX_ANGLE_ALL_AXIS): updated_attitude.roll_deg = self.MAX_ANGLE_ALL_AXIS if(updated_attitude.yaw_deg < -self.MAX_ANGLE_ALL_AXIS): updated_attitude.yaw_deg = -self.MAX_ANGLE_ALL_AXIS if(updated_attitude.yaw_deg > self.MAX_ANGLE_ALL_AXIS): updated_attitude.yaw_deg = self.MAX_ANGLE_ALL_AXIS updated_attitude.pitch = math.radians(updated_attitude.pitch_deg) updated_attitude.quaternion = updated_attitude.get_quaternion() self.set_angle_thrust(updated_attitude, self.LAST_THRUST) print(updated_attitude.pitch_deg,) if(should_hover_on_finish): self.hover() pass def land(self): self.vehicle.mode = dronekit.VehicleMode("LAND") self.STATE = VehicleStates.landing while((self.vehicle.location.global_relative_frame.alt) >= self.LAND_ALTITUDE): sleep(self.STANDARD_SLEEP_TIME) else: self.STATE = VehicleStates.landed def land_attitude(self): initial_alt = self.vehicle.location.global_relative_frame.alt while((initial_alt - self.vehicle.location.global_relative_frame.alt) >= self.LAND_ALTITUDE): self.set_angle_thrust(StandardAttitudes.level, StandardThrusts.land) print "Disarming Drone" self.disarm_drone() def land_guided(self): initial_alt = self.vehicle.location.global_relative_frame.alt self.switch_control(mode_name="GUIDED") while((initial_alt - self.vehicle.location.global_relative_frame.alt) >= self.LAND_ALTITUDE): self.send_ned_velocity(0, 0, 0.3) print "Disarming Drone" self.disarm_drone() def do_circle_turn(self, desired_angle, direction, duration): if(duration > self.MAX_TURN_TIME): return self.STATE = VehicleStates.flying max_angle = math.radians(desired_angle) altitude_to_hold = self.vehicle.location.global_relative_frame.alt duration = timedelta(seconds=duration) end_manuever = datetime.now() + duration self.fly_for_time(1, StandardAttitudes.forward, self.TURN_START_VELOCITY, False) while(end_manuever <= datetime.now()): change_in_time = end_manuever - datetime.now() current_altitude = self.vehicle.location.global_relative_frame.alt roll_angle = max_angle * (math.cos(self.vehicle.airspeed * change_in_time.seconds) / self.TURN_RADIUS) pitch_angle = max_angle * (math.sin(self.vehicle.airspeed * change_in_time.seconds) / self.TURN_RADIUS) roll_angle = math.degrees(roll_angle) pitch_angle = math.degrees(pitch_angle) self.last_attitude.yaw = math.degrees(self.last_attitude.yaw) updated_attitude = DroneAttitude(pitch_angle, self.last_attitude.yaw, roll_angle) self.set_angle_thrust(updated_attitude, StandardThrusts.hover) print("Sent message.") if(current_altitude > altitude_to_hold): max_angle = math.radians(desired_angle + self.STANDARD_ANGLE_ADJUSTMENT) elif(current_altitude < altitude_to_hold): max_angle = math.radians(desired_angle - self.STANDARD_ANGLE_ADJUSTMENT) else: max_angle = math.radians(desired_angle) sleep(self.STANDARD_SLEEP_TIME) self.fly_for_time(1, StandardAttitudes.forward, self.vehicle.airspeed, True) def switch_gimbal_mode(self): gimbal = serial.Serial("/dev/ttyS1", 115200, timeout=10) if self.scanField == False: gimbal.write("86 0 ") gimbal.write(self.GIMBAL_PORTRAIT) gimbal.close() self.scanField = True else: gimbal.write("s") gimbal.close() self.scanField = False def check_battery_voltage(self): if(self.vehicle.battery.voltage < self.BATTERY_FAILSAFE_VOLTAGE): self.land() class FailsafeController(threading.Thread): def __init__(self, atc_instance): self.atc = atc_instance self.stoprequest = threading.Event() super(FailsafeController, self).__init__() def run(self): while not self.stoprequest.isSet(): if self.atc.STATE == VehicleStates.hover or self.atc.STATE == VehicleStates.flying: self.atc.check_battery_voltage() if(self.atc.realsense_range_finder != None): self.atc.send_distance_message() if(self.atc.scanse != None): self.atc.send_distance_lidar_message() try: self.send_frame_to_drone() except: qwertyuiop = 1234 sleep(0.01) def join(self, timeout=None): if self.atc.vehicle.armed: if self.atc.STATE != VehicleStates.landed: self.atc.land() if(self.atc.realsense_range_finder != None): self.atc.realsense_range_finder.shutdown() if(self.atc.scanse != None): self.atc.scanse.shutdown() self.stoprequest.set() super(FailsafeController, self).join(timeout)
mit
PlayUAV/MissionPlanner
Lib/encodings/cp1250.py
93
14249
""" Python Character Mapping Codec cp1250 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp1250', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\u20ac' # 0x80 -> EURO SIGN u'\ufffe' # 0x81 -> UNDEFINED u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK u'\ufffe' # 0x83 -> UNDEFINED u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS u'\u2020' # 0x86 -> DAGGER u'\u2021' # 0x87 -> DOUBLE DAGGER u'\ufffe' # 0x88 -> UNDEFINED u'\u2030' # 0x89 -> PER MILLE SIGN u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK u'\u015a' # 0x8C -> LATIN CAPITAL LETTER S WITH ACUTE u'\u0164' # 0x8D -> LATIN CAPITAL LETTER T WITH CARON u'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE u'\ufffe' # 0x90 -> UNDEFINED u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK u'\u2022' # 0x95 -> BULLET u'\u2013' # 0x96 -> EN DASH u'\u2014' # 0x97 -> EM DASH u'\ufffe' # 0x98 -> UNDEFINED u'\u2122' # 0x99 -> TRADE MARK SIGN u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK u'\u015b' # 0x9C -> LATIN SMALL LETTER S WITH ACUTE u'\u0165' # 0x9D -> LATIN SMALL LETTER T WITH CARON u'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON u'\u017a' # 0x9F -> LATIN SMALL LETTER Z WITH ACUTE u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\u02c7' # 0xA1 -> CARON u'\u02d8' # 0xA2 -> BREVE u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE u'\xa4' # 0xA4 -> CURRENCY SIGN u'\u0104' # 0xA5 -> LATIN CAPITAL LETTER A WITH OGONEK u'\xa6' # 0xA6 -> BROKEN BAR u'\xa7' # 0xA7 -> SECTION SIGN u'\xa8' # 0xA8 -> DIAERESIS u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xac' # 0xAC -> NOT SIGN u'\xad' # 0xAD -> SOFT HYPHEN u'\xae' # 0xAE -> REGISTERED SIGN u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE u'\xb0' # 0xB0 -> DEGREE SIGN u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\u02db' # 0xB2 -> OGONEK u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE u'\xb4' # 0xB4 -> ACUTE ACCENT u'\xb5' # 0xB5 -> MICRO SIGN u'\xb6' # 0xB6 -> PILCROW SIGN u'\xb7' # 0xB7 -> MIDDLE DOT u'\xb8' # 0xB8 -> CEDILLA u'\u0105' # 0xB9 -> LATIN SMALL LETTER A WITH OGONEK u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u013d' # 0xBC -> LATIN CAPITAL LETTER L WITH CARON u'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT u'\u013e' # 0xBE -> LATIN SMALL LETTER L WITH CARON u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE u'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE u'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xd7' # 0xD7 -> MULTIPLICATION SIGN u'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON u'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE u'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE u'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S u'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS u'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE u'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf7' # 0xF7 -> DIVISION SIGN u'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON u'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE u'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE u'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA u'\u02d9' # 0xFF -> DOT ABOVE ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
gpl-3.0
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/docutils/languages/he.py
148
2683
# Author: Meir Kriheli # Id: $Id: he.py 4837 2006-12-26 09:59:41Z sfcben $ # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Hebrew-language mappings for language-dependent features of Docutils. """ __docformat__ = 'reStructuredText' labels = { # fixed: language-dependent 'author': u'\u05de\u05d7\u05d1\u05e8', 'authors': u'\u05de\u05d7\u05d1\u05e8\u05d9', 'organization': u'\u05d0\u05e8\u05d2\u05d5\u05df', 'address': u'\u05db\u05ea\u05d5\u05d1\u05ea', 'contact': u'\u05d0\u05d9\u05e9 \u05e7\u05e9\u05e8', 'version': u'\u05d2\u05e8\u05e1\u05d4', 'revision': u'\u05de\u05d4\u05d3\u05d5\u05e8\u05d4', 'status': u'\u05e1\u05d8\u05d8\u05d5\u05e1', 'date': u'\u05ea\u05d0\u05e8\u05d9\u05da', 'copyright': u'\u05d6\u05db\u05d5\u05d9\u05d5\u05ea \u05e9\u05de\u05d5\u05e8\u05d5\u05ea', 'dedication': u'\u05d4\u05e7\u05d3\u05e9\u05d4', 'abstract': u'\u05ea\u05e7\u05e6\u05d9\u05e8', 'attention': u'\u05ea\u05e9\u05d5\u05de\u05ea \u05dc\u05d1', 'caution': u'\u05d6\u05d4\u05d9\u05e8\u05d5\u05ea', 'danger': u'\u05e1\u05db\u05e0\u05d4', 'error': u'\u05e9\u05d2\u05d9\u05d0\u05d4' , 'hint': u'\u05e8\u05de\u05d6', 'important': u'\u05d7\u05e9\u05d5\u05d1', 'note': u'\u05d4\u05e2\u05e8\u05d4', 'tip': u'\u05d8\u05d9\u05e4', 'warning': u'\u05d0\u05d6\u05d4\u05e8\u05d4', 'contents': u'\u05ea\u05d5\u05db\u05df'} """Mapping of node class name to label text.""" bibliographic_fields = { # language-dependent: fixed u'\u05de\u05d7\u05d1\u05e8': 'author', u'\u05de\u05d7\u05d1\u05e8\u05d9': 'authors', u'\u05d0\u05e8\u05d2\u05d5\u05df': 'organization', u'\u05db\u05ea\u05d5\u05d1\u05ea': 'address', u'\u05d0\u05d9\u05e9 \u05e7\u05e9\u05e8': 'contact', u'\u05d2\u05e8\u05e1\u05d4': 'version', u'\u05de\u05d4\u05d3\u05d5\u05e8\u05d4': 'revision', u'\u05e1\u05d8\u05d8\u05d5\u05e1': 'status', u'\u05ea\u05d0\u05e8\u05d9\u05da': 'date', u'\u05d6\u05db\u05d5\u05d9\u05d5\u05ea \u05e9\u05de\u05d5\u05e8\u05d5\u05ea': 'copyright', u'\u05d4\u05e7\u05d3\u05e9\u05d4': 'dedication', u'\u05ea\u05e7\u05e6\u05d9\u05e8': 'abstract'} """Hebrew to canonical name mapping for bibliographic fields.""" author_separators = [';', ','] """List of separator strings for the 'Authors' bibliographic field. Tried in order."""
agpl-3.0
jcoady9/python-for-android
python3-alpha/python3-src/Lib/encodings/utf_32.py
180
5128
""" Python 'utf-32' Codec """ import codecs, sys ### Codec APIs encode = codecs.utf_32_encode def decode(input, errors='strict'): return codecs.utf_32_decode(input, errors, True) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): codecs.IncrementalEncoder.__init__(self, errors) self.encoder = None def encode(self, input, final=False): if self.encoder is None: result = codecs.utf_32_encode(input, self.errors)[0] if sys.byteorder == 'little': self.encoder = codecs.utf_32_le_encode else: self.encoder = codecs.utf_32_be_encode return result return self.encoder(input, self.errors)[0] def reset(self): codecs.IncrementalEncoder.reset(self) self.encoder = None def getstate(self): # state info we return to the caller: # 0: stream is in natural order for this platform # 2: endianness hasn't been determined yet # (we're never writing in unnatural order) return (2 if self.encoder is None else 0) def setstate(self, state): if state: self.encoder = None else: if sys.byteorder == 'little': self.encoder = codecs.utf_32_le_encode else: self.encoder = codecs.utf_32_be_encode class IncrementalDecoder(codecs.BufferedIncrementalDecoder): def __init__(self, errors='strict'): codecs.BufferedIncrementalDecoder.__init__(self, errors) self.decoder = None def _buffer_decode(self, input, errors, final): if self.decoder is None: (output, consumed, byteorder) = \ codecs.utf_32_ex_decode(input, errors, 0, final) if byteorder == -1: self.decoder = codecs.utf_32_le_decode elif byteorder == 1: self.decoder = codecs.utf_32_be_decode elif consumed >= 4: raise UnicodeError("UTF-32 stream does not start with BOM") return (output, consumed) return self.decoder(input, self.errors, final) def reset(self): codecs.BufferedIncrementalDecoder.reset(self) self.decoder = None def getstate(self): # additonal state info from the base class must be None here, # as it isn't passed along to the caller state = codecs.BufferedIncrementalDecoder.getstate(self)[0] # additional state info we pass to the caller: # 0: stream is in natural order for this platform # 1: stream is in unnatural order # 2: endianness hasn't been determined yet if self.decoder is None: return (state, 2) addstate = int((sys.byteorder == "big") != (self.decoder is codecs.utf_32_be_decode)) return (state, addstate) def setstate(self, state): # state[1] will be ignored by BufferedIncrementalDecoder.setstate() codecs.BufferedIncrementalDecoder.setstate(self, state) state = state[1] if state == 0: self.decoder = (codecs.utf_32_be_decode if sys.byteorder == "big" else codecs.utf_32_le_decode) elif state == 1: self.decoder = (codecs.utf_32_le_decode if sys.byteorder == "big" else codecs.utf_32_be_decode) else: self.decoder = None class StreamWriter(codecs.StreamWriter): def __init__(self, stream, errors='strict'): self.encoder = None codecs.StreamWriter.__init__(self, stream, errors) def reset(self): codecs.StreamWriter.reset(self) self.encoder = None def encode(self, input, errors='strict'): if self.encoder is None: result = codecs.utf_32_encode(input, errors) if sys.byteorder == 'little': self.encoder = codecs.utf_32_le_encode else: self.encoder = codecs.utf_32_be_encode return result else: return self.encoder(input, errors) class StreamReader(codecs.StreamReader): def reset(self): codecs.StreamReader.reset(self) try: del self.decode except AttributeError: pass def decode(self, input, errors='strict'): (object, consumed, byteorder) = \ codecs.utf_32_ex_decode(input, errors, 0, False) if byteorder == -1: self.decode = codecs.utf_32_le_decode elif byteorder == 1: self.decode = codecs.utf_32_be_decode elif consumed>=4: raise UnicodeError("UTF-32 stream does not start with BOM") return (object, consumed) ### encodings module API def getregentry(): return codecs.CodecInfo( name='utf-32', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
apache-2.0
Leila20/django
tests/multiple_database/models.py
282
2472
from django.contrib.auth.models import User from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Review(models.Model): source = models.CharField(max_length=100) content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() def __str__(self): return self.source class Meta: ordering = ('source',) class PersonManager(models.Manager): def get_by_natural_key(self, name): return self.get(name=name) @python_2_unicode_compatible class Person(models.Model): objects = PersonManager() name = models.CharField(max_length=100) def __str__(self): return self.name class Meta: ordering = ('name',) # This book manager doesn't do anything interesting; it just # exists to strip out the 'extra_arg' argument to certain # calls. This argument is used to establish that the BookManager # is actually getting used when it should be. class BookManager(models.Manager): def create(self, *args, **kwargs): kwargs.pop('extra_arg', None) return super(BookManager, self).create(*args, **kwargs) def get_or_create(self, *args, **kwargs): kwargs.pop('extra_arg', None) return super(BookManager, self).get_or_create(*args, **kwargs) @python_2_unicode_compatible class Book(models.Model): objects = BookManager() title = models.CharField(max_length=100) published = models.DateField() authors = models.ManyToManyField(Person) editor = models.ForeignKey(Person, models.SET_NULL, null=True, related_name='edited') reviews = GenericRelation(Review) pages = models.IntegerField(default=100) def __str__(self): return self.title class Meta: ordering = ('title',) @python_2_unicode_compatible class Pet(models.Model): name = models.CharField(max_length=100) owner = models.ForeignKey(Person, models.CASCADE) def __str__(self): return self.name class Meta: ordering = ('name',) class UserProfile(models.Model): user = models.OneToOneField(User, models.SET_NULL, null=True) flavor = models.CharField(max_length=100) class Meta: ordering = ('flavor',)
bsd-3-clause
oberstet/autobahn-python
inject-build-id.py
2
1677
#!python import os if __name__ == '__main__': _EVAR = "AUTOBAHN_BUILD_ID" _SEARCH = "__build__ = u'00000'" _REPLACE = "__build__ = u'{}'" if _EVAR in os.environ: files = [] try: from autobahn import _version except ImportError: pass else: files.append(os.path.abspath(_version.__file__)) fn = 'autobahn/_version.py' if os.path.exists(fn): files.append(os.path.abspath(fn)) done = [] for fn in files: if fn in done: print('Skipping file "{}": already processed'.format(fn)) else: with open(fn) as f: contents = f.read() build_id_stmt = _REPLACE.format(os.environ[_EVAR]) if contents.find(_SEARCH): contents = contents.replace(_SEARCH, build_id_stmt) print(contents) with open(fn, 'w') as f: f.write(contents) f.flush() print('Ok: replaced placeholder build ID in file "{}" with "{}"'.format(fn, build_id_stmt)) done.append(fn) else: if contents.find(build_id_stmt): print('Skipping file "{}": build ID already correct') else: error_msg = 'Error: could not find search string "{}" to inject build ID in file "{}"'.format(_SEARCH, _version.__file__) raise Exception(error_msg) else: print('Skipping injection of build ID: AUTOBAHN_BUILD_ID not set')
mit
Ichimonji10/robottelo
robottelo/ui/hardwaremodel.py
5
2511
"""Implements Hardware Models CRUD in UI""" from robottelo.ui.base import Base, UINoSuchElementError from robottelo.ui.locators import common_locators, locators from robottelo.ui.navigator import Navigator class HardwareModel(Base): """Provides the CRUD functionality for Hardware-Models.""" def create(self, name, hw_model=None, vendor_class=None, info=None): """Creates the Hardware-Models. :param str name: Hardware-Model name. :param str hw_model: The Hardware-Model type. :param str vendor_class: The Hardware-Model's vendor-class. :param str info: some information related to Hardware-Models. """ self.click(locators['hwmodels.new']) if self.wait_until_element(locators['hwmodels.name']): self.find_element(locators['hwmodels.name']).send_keys(name) if hw_model: self.find_element( locators['hwmodels.model']).send_keys(hw_model) if vendor_class: self.find_element( locators['hwmodels.vclass']).send_keys(vendor_class) if info: self.find_element( locators['hwmodels.info']).send_keys(info) self.click(common_locators['submit']) def update(self, old_name, new_name=None): """Updates the Hardware-Models. :param str old_name: Hardware-Model's old-name. :param str new_name: The Hardware-Model's new-name. """ element = self.search(old_name) if element: element.click() if (self.wait_until_element(locators['hwmodels.name']) and new_name): self.field_update('hwmodels.name', new_name) self.click(common_locators['submit']) else: raise UINoSuchElementError( "Could not find hardware-model '%s'" % old_name) def navigate_to_entity(self): """Navigate to Hardware-Models entity page""" Navigator(self.browser).go_to_hardware_models() def _search_locator(self): """Specify locator for Hardware-Models entity search procedure""" return locators['hwmodels.select_name'] def delete(self, name, really=True): """Deletes the Hardware-Models. :param str name: Hardware-Model's name to search. :param bool really: Value required for negative tests. """ self.delete_entity( name, really, locators['hwmodels.delete'], )
gpl-3.0
miptliot/edx-platform
common/djangoapps/request_cache/middleware.py
20
4782
""" An implementation of a RequestCache. This cache is reset at the beginning and end of every request. """ import threading import crum from django.utils.encoding import force_text class _RequestCache(threading.local): """ A thread-local for storing the per-request cache. """ def __init__(self): super(_RequestCache, self).__init__() self.data = {} REQUEST_CACHE = _RequestCache() class RequestCache(object): @classmethod def get_request_cache(cls, name=None): """ This method is deprecated. Please use :func:`request_cache.get_cache`. """ if name is None: return REQUEST_CACHE else: return REQUEST_CACHE.data.setdefault(name, {}) @classmethod def get_current_request(cls): """ This method is deprecated. Please use :func:`request_cache.get_request`. """ return crum.get_current_request() @classmethod def clear_request_cache(cls, name=None): """ Empty the request cache. """ if name is None: REQUEST_CACHE.data = {} elif REQUEST_CACHE.data.get(name): REQUEST_CACHE.data[name] = {} def process_request(self, request): self.clear_request_cache() return None def process_response(self, request, response): self.clear_request_cache() return response def process_exception(self, request, exception): # pylint: disable=unused-argument """ Clear the RequestCache after a failed request. """ self.clear_request_cache() return None def request_cached(f): """ A decorator for wrapping a function and automatically handles caching its return value, as well as returning that cached value for subsequent calls to the same function, with the same parameters, within a given request. Notes: - we convert arguments and keyword arguments to their string form to build the cache key, so if you have args/kwargs that can't be converted to strings, you're gonna have a bad time (don't do it) - cache key cardinality depends on the args/kwargs, so if you're caching a function that takes five arguments, you might have deceptively low cache efficiency. prefer function with fewer arguments. - we use the default request cache, not a named request cache (this shouldn't matter, but just mentioning it) - benchmark, benchmark, benchmark! if you never measure, how will you know you've improved? or regressed? Arguments: f (func): the function to wrap Returns: func: a wrapper function which will call the wrapped function, passing in the same args/kwargs, cache the value it returns, and return that cached value for subsequent calls with the same args/kwargs within a single request """ return ns_request_cached()(f) def ns_request_cached(namespace=None): """ Same as request_cached above, except an optional namespace can be passed in to compartmentalize the cache. Arguments: namespace (string): An optional namespace to use for the cache. Useful if the caller wants to manage their own sub-cache by, for example, calling RequestCache.clear_request_cache for their own namespace. """ def outer_wrapper(f): """ Outer wrapper that decorates the given function Arguments: f (func): the function to wrap """ def inner_wrapper(*args, **kwargs): """ Wrapper function to decorate with. """ # Check to see if we have a result in cache. If not, invoke our wrapped # function. Cache and return the result to the caller. rcache = RequestCache.get_request_cache(namespace) rcache = rcache.data if namespace is None else rcache cache_key = func_call_cache_key(f, *args, **kwargs) if cache_key in rcache: return rcache.get(cache_key) else: result = f(*args, **kwargs) rcache[cache_key] = result return result return inner_wrapper return outer_wrapper def func_call_cache_key(func, *args, **kwargs): """ Returns a cache key based on the function's module the function's name, and a stringified list of arguments and a query string-style stringified list of keyword arguments. """ converted_args = map(force_text, args) converted_kwargs = map(force_text, reduce(list.__add__, map(list, sorted(kwargs.iteritems())), [])) cache_keys = [func.__module__, func.func_name] + converted_args + converted_kwargs return u'.'.join(cache_keys)
agpl-3.0
catapult-project/catapult-csm
third_party/google-endpoints/google/api/control/wsgi.py
7
22713
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """wsgi implement behaviour that provides service control as wsgi middleware. It provides the :class:`Middleware`, which is a WSGI middleware implementation that wraps another WSGI application to uses a provided :class:`google.api.control.client.Client` to provide service control. """ #pylint: disable=too-many-arguments from __future__ import absolute_import from datetime import datetime import httplib import logging import os import socket import uuid import urllib2 import urlparse import wsgiref.util from google.api.auth import suppliers, tokens from . import check_request, messages, report_request, service logger = logging.getLogger(__name__) _CONTENT_LENGTH = 'content-length' _DEFAULT_LOCATION = 'global' _METADATA_SERVER_URL = 'http://metadata.google.internal' def _running_on_gce(): headers = {'Metadata-Flavor': 'Google'} try: request = urllib2.Request(_METADATA_SERVER_URL, headers=headers) response = urllib2.urlopen(request) if response.info().getheader('Metadata-Flavor') == 'Google': return True except (urllib2.URLError, socket.error): pass return False def _get_platform(): server_software = os.environ.get('SERVER_SOFTWARE', '') if server_software.startswith('Development'): return report_request.ReportedPlatforms.DEVELOPMENT elif os.environ.get('KUBERNETES_SERVICE_HOST'): return report_request.ReportedPlatforms.GKE elif _running_on_gce(): # We're either in GAE Flex or GCE if os.environ.get('GAE_MODULE_NAME'): return report_request.ReportedPlatforms.GAE_FLEX else: return report_request.ReportedPlatforms.GCE elif os.environ.get('GAE_MODULE_NAME'): return report_request.ReportedPlatforms.GAE_STANDARD return report_request.ReportedPlatforms.UNKNOWN platform = _get_platform() def running_on_devserver(): return platform == report_request.ReportedPlatforms.DEVELOPMENT def add_all(application, project_id, control_client, loader=service.Loaders.FROM_SERVICE_MANAGEMENT): """Adds all endpoints middleware to a wsgi application. Sets up application to use all default endpoints middleware. Example: >>> application = MyWsgiApp() # an existing WSGI application >>> >>> # the name of the controlled service >>> service_name = 'my-service-name' >>> >>> # A GCP project with service control enabled >>> project_id = 'my-project-id' >>> >>> # wrap the app for service control >>> from google.api.control import wsgi >>> control_client = client.Loaders.DEFAULT.load(service_name) >>> control_client.start() >>> wrapped_app = add_all(application, project_id, control_client) >>> >>> # now use wrapped_app in place of app Args: application: the wrapped wsgi application project_id: the project_id thats providing service control support control_client: the service control client instance loader (:class:`google.api.control.service.Loader`): loads the service instance that configures this instance's behaviour """ a_service = loader.load() if not a_service: raise ValueError("Failed to load service config") authenticator = _create_authenticator(a_service) wrapped_app = Middleware(application, project_id, control_client) if authenticator: wrapped_app = AuthenticationMiddleware(wrapped_app, authenticator) return EnvironmentMiddleware(wrapped_app, a_service) def _next_operation_uuid(): return uuid.uuid4().hex class EnvironmentMiddleware(object): """A WSGI middleware that sets related variables in the environment. It attempts to add the following vars: - google.api.config.service - google.api.config.service_name - google.api.config.method_registry - google.api.config.reporting_rules - google.api.config.method_info """ # pylint: disable=too-few-public-methods SERVICE = 'google.api.config.service' SERVICE_NAME = 'google.api.config.service_name' METHOD_REGISTRY = 'google.api.config.method_registry' METHOD_INFO = 'google.api.config.method_info' REPORTING_RULES = 'google.api.config.reporting_rules' def __init__(self, application, a_service): """Initializes a new Middleware instance. Args: application: the wrapped wsgi application a_service (:class:`google.api.gen.servicecontrol_v1_messages.Service`): a service instance """ if not isinstance(a_service, messages.Service): raise ValueError("service is None or not an instance of Service") self._application = application self._service = a_service method_registry, reporting_rules = self._configure() self._method_registry = method_registry self._reporting_rules = reporting_rules def _configure(self): registry = service.MethodRegistry(self._service) logs, metric_names, label_names = service.extract_report_spec(self._service) reporting_rules = report_request.ReportingRules.from_known_inputs( logs=logs, metric_names=metric_names, label_names=label_names) return registry, reporting_rules def __call__(self, environ, start_response): environ[self.SERVICE] = self._service environ[self.SERVICE_NAME] = self._service.name environ[self.METHOD_REGISTRY] = self._method_registry environ[self.REPORTING_RULES] = self._reporting_rules parsed_uri = urlparse.urlparse(wsgiref.util.request_uri(environ)) http_method = environ.get('REQUEST_METHOD') method_info = self._method_registry.lookup(http_method, parsed_uri.path) if method_info: environ[self.METHOD_INFO] = method_info return self._application(environ, start_response) class Middleware(object): """A WSGI middleware implementation that provides service control. Example: >>> app = MyWsgiApp() # an existing WSGI application >>> >>> # the name of the controlled service >>> service_name = 'my-service-name' >>> >>> # A GCP project with service control enabled >>> project_id = 'my-project-id' >>> >>> # wrap the app for service control >>> from google.api.control import client, wsgi, service >>> control_client = client.Loaders.DEFAULT.load(service_name) >>> control_client.start() >>> wrapped_app = wsgi.Middleware(app, control_client, project_id) >>> env_app = wsgi.EnvironmentMiddleware(wrapped,app) >>> >>> # now use env_app in place of app """ # pylint: disable=too-few-public-methods, fixme _NO_API_KEY_MSG = ( 'Method does not allow callers without established identity.' ' Please use an API key or other form of API consumer identity' ' to call this API.' ) def __init__(self, application, project_id, control_client, next_operation_id=_next_operation_uuid, timer=datetime.utcnow): """Initializes a new Middleware instance. Args: application: the wrapped wsgi application project_id: the project_id thats providing service control support control_client: the service control client instance next_operation_id (func): produces the next operation timer (func[[datetime.datetime]]): a func that obtains the current time """ self._application = application self._project_id = project_id self._next_operation_id = next_operation_id self._control_client = control_client self._timer = timer def __call__(self, environ, start_response): # pylint: disable=too-many-locals method_info = environ.get(EnvironmentMiddleware.METHOD_INFO) if not method_info: # just allow the wrapped application to handle the request logger.debug('method_info not present in the wsgi environment' ', no service control') return self._application(environ, start_response) latency_timer = _LatencyTimer(self._timer) latency_timer.start() # Determine if the request can proceed http_method = environ.get('REQUEST_METHOD') parsed_uri = urlparse.urlparse(wsgiref.util.request_uri(environ)) app_info = _AppInfo() # TODO: determine if any of the more complex ways of getting the request size # (e.g) buffering and counting the wsgi input stream is more appropriate here try: app_info.request_size = int(environ.get('CONTENT_LENGTH', report_request.SIZE_NOT_SET)) except ValueError: logger.warn('ignored bad content-length: %s', environ.get('CONTENT_LENGTH')) app_info.http_method = http_method app_info.url = parsed_uri check_info = self._create_check_info(method_info, parsed_uri, environ) if not check_info.api_key and not method_info.allow_unregistered_calls: logger.debug("skipping %s, no api key was provided", parsed_uri) error_msg = self._handle_missing_api_key(app_info, start_response) else: check_req = check_info.as_check_request() logger.debug('checking %s with %s', method_info, check_request) check_resp = self._control_client.check(check_req) error_msg = self._handle_check_response(app_info, check_resp, start_response) if error_msg: # send a report request that indicates that the request failed rules = environ.get(EnvironmentMiddleware.REPORTING_RULES) latency_timer.end() report_req = self._create_report_request(method_info, check_info, app_info, latency_timer, rules) logger.debug('scheduling report_request %s', report_req) self._control_client.report(report_req) return error_msg # update the client with the response latency_timer.app_start() # run the application request in an inner handler that sets the status # and response code on app_info def inner_start_response(status, response_headers, exc_info=None): app_info.response_code = int(status.partition(' ')[0]) for name, value in response_headers: if name.lower() == _CONTENT_LENGTH: app_info.response_size = int(value) break return start_response(status, response_headers, exc_info) result = self._application(environ, inner_start_response) # perform reporting, result must be joined otherwise the latency record # is incorrect result = b''.join(result) latency_timer.end() app_info.response_size = len(result) rules = environ.get(EnvironmentMiddleware.REPORTING_RULES) report_req = self._create_report_request(method_info, check_info, app_info, latency_timer, rules) logger.debug('scheduling report_request %s', report_req) self._control_client.report(report_req) return result def _create_report_request(self, method_info, check_info, app_info, latency_timer, reporting_rules): # TODO: determine how to obtain the consumer_project_id and the location # correctly report_info = report_request.Info( api_key=check_info.api_key, api_key_valid=app_info.api_key_valid, api_method=method_info.selector, consumer_project_id=self._project_id, # TODO: see above location=_DEFAULT_LOCATION, # TODO: see above method=app_info.http_method, operation_id=check_info.operation_id, operation_name=check_info.operation_name, backend_time=latency_timer.backend_time, overhead_time=latency_timer.overhead_time, platform=platform, producer_project_id=self._project_id, protocol=report_request.ReportedProtocols.HTTP, request_size=app_info.request_size, request_time=latency_timer.request_time, response_code=app_info.response_code, response_size=app_info.response_size, referer=check_info.referer, service_name=check_info.service_name, url=app_info.url ) return report_info.as_report_request(reporting_rules, timer=self._timer) def _create_check_info(self, method_info, parsed_uri, environ): service_name = environ.get(EnvironmentMiddleware.SERVICE_NAME) operation_id = self._next_operation_id() api_key_valid = False api_key = _find_api_key_param(method_info, parsed_uri) if not api_key: api_key = _find_api_key_header(method_info, environ) if not api_key: api_key = _find_default_api_key_param(parsed_uri) if api_key: api_key_valid = True check_info = check_request.Info( api_key=api_key, api_key_valid=api_key_valid, client_ip=environ.get('REMOTE_ADDR', ''), consumer_project_id=self._project_id, # TODO: switch this to producer_project_id operation_id=operation_id, operation_name=method_info.selector, referer=environ.get('HTTP_REFERER', ''), service_name=service_name ) return check_info def _handle_check_response(self, app_info, check_resp, start_response): code, detail, api_key_valid = check_request.convert_response( check_resp, self._project_id) if code == httplib.OK: return None # the check was OK # there was problem; the request cannot proceed logger.warn('Check failed %d, %s', code, detail) error_msg = '%d %s' % (code, detail) start_response(error_msg, []) app_info.response_code = code app_info.api_key_valid = api_key_valid return error_msg # the request cannot continue def _handle_missing_api_key(self, app_info, start_response): code = httplib.UNAUTHORIZED detail = self._NO_API_KEY_MSG logger.warn('Check not performed %d, %s', code, detail) error_msg = '%d %s' % (code, detail) start_response(error_msg, []) app_info.response_code = code app_info.api_key_valid = False return error_msg # the request cannot continue class _AppInfo(object): # pylint: disable=too-few-public-methods def __init__(self): self.api_key_valid = True self.response_code = httplib.INTERNAL_SERVER_ERROR self.response_size = report_request.SIZE_NOT_SET self.request_size = report_request.SIZE_NOT_SET self.http_method = None self.url = None class _LatencyTimer(object): def __init__(self, timer): self._timer = timer self._start = None self._app_start = None self._end = None def start(self): self._start = self._timer() def app_start(self): self._app_start = self._timer() def end(self): self._end = self._timer() if self._app_start is None: self._app_start = self._end @property def request_time(self): if self._start and self._end: return self._end - self._start return None @property def overhead_time(self): if self._start and self._app_start: return self._app_start - self._start return None @property def backend_time(self): if self._end and self._app_start: return self._end - self._app_start return None def _find_api_key_param(info, parsed_uri): params = info.api_key_url_query_params if not params: return None param_dict = urlparse.parse_qs(parsed_uri.query) if not param_dict: return None for q in params: value = param_dict.get(q) if value: # param's values are lists, assume the first value # is what's needed return value[0] return None _DEFAULT_API_KEYS = ('key', 'api_key') def _find_default_api_key_param(parsed_uri): param_dict = urlparse.parse_qs(parsed_uri.query) if not param_dict: return None for q in _DEFAULT_API_KEYS: value = param_dict.get(q) if value: # param's values are lists, assume the first value # is what's needed return value[0] return None def _find_api_key_header(info, environ): headers = info.api_key_http_header if not headers: return None for h in headers: value = environ.get('HTTP_' + h.upper()) if value: return value # headers have single values return None def _create_authenticator(a_service): """Create an instance of :class:`google.auth.tokens.Authenticator`. Args: a_service (:class:`google.api.gen.servicecontrol_v1_messages.Service`): a service instance """ if not isinstance(a_service, messages.Service): raise ValueError("service is None or not an instance of Service") authentication = a_service.authentication if not authentication: logger.info("authentication is not configured in service, " "authentication checks will be disabled") return issuers_to_provider_ids = {} issuer_uri_configs = {} for provider in authentication.providers: issuer = provider.issuer jwks_uri = provider.jwksUri # Enable openID discovery if jwks_uri is unset open_id = jwks_uri is None issuer_uri_configs[issuer] = suppliers.IssuerUriConfig(open_id, jwks_uri) issuers_to_provider_ids[issuer] = provider.id key_uri_supplier = suppliers.KeyUriSupplier(issuer_uri_configs) jwks_supplier = suppliers.JwksSupplier(key_uri_supplier) authenticator = tokens.Authenticator(issuers_to_provider_ids, jwks_supplier) return authenticator class AuthenticationMiddleware(object): """A WSGI middleware that does authentication checks for incoming requests. In environments where os.environ is replaced with a request-local and thread-independent copy (e.g. Google Appengine), authentication result is added to os.environ so that the wrapped application can make use of the authentication result. """ # pylint: disable=too-few-public-methods USER_INFO = "google.api.auth.user_info" def __init__(self, application, authenticator): """Initializes an authentication middleware instance. Args: application: a WSGI application to be wrapped authenticator (:class:`google.auth.tokens.Authenticator`): an authenticator that authenticates incoming requests """ if not isinstance(authenticator, tokens.Authenticator): raise ValueError("Invalid authenticator") self._application = application self._authenticator = authenticator def __call__(self, environ, start_response): method_info = environ.get(EnvironmentMiddleware.METHOD_INFO) if not method_info or not method_info.auth_info: # No authentication configuration for this method logger.debug("authentication is not configured") return self._application(environ, start_response) auth_token = _extract_auth_token(environ) user_info = None if not auth_token: logger.debug("No auth token is attached to the request") else: try: service_name = environ.get(EnvironmentMiddleware.SERVICE_NAME) user_info = self._authenticator.authenticate(auth_token, method_info.auth_info, service_name) except Exception: # pylint: disable=broad-except logger.debug("Cannot decode and verify the auth token. The backend " "will not be able to retrieve user info", exc_info=True) environ[self.USER_INFO] = user_info # pylint: disable=protected-access if user_info and not isinstance(os.environ, os._Environ): # Set user info into os.environ only if os.environ is replaced # with a request-local copy os.environ[self.USER_INFO] = user_info response = self._application(environ, start_response) # Erase user info from os.environ for safety and sanity. if self.USER_INFO in os.environ: del os.environ[self.USER_INFO] return response _ACCESS_TOKEN_PARAM_NAME = "access_token" _BEARER_TOKEN_PREFIX = "Bearer " _BEARER_TOKEN_PREFIX_LEN = len(_BEARER_TOKEN_PREFIX) def _extract_auth_token(environ): # First try to extract auth token from HTTP authorization header. auth_header = environ.get("HTTP_AUTHORIZATION") if auth_header: if auth_header.startswith(_BEARER_TOKEN_PREFIX): return auth_header[_BEARER_TOKEN_PREFIX_LEN:] return # Then try to read auth token from query. parameters = urlparse.parse_qs(environ.get("QUERY_STRING", "")) if _ACCESS_TOKEN_PARAM_NAME in parameters: auth_token, = parameters[_ACCESS_TOKEN_PARAM_NAME] return auth_token
bsd-3-clause
draperlaboratory/stout
functional_tests/tests.py
2
3909
from selenium import webdriver from selenium.webdriver.common.keys import Keys from django.test import LiveServerTestCase from django.contrib.auth.models import User from django.conf import settings from django.conf.urls.static import static import sys from op_tasks.models import Dataset, Product, OpTask, UserProfile, TaskListItem, Experiment import os os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = 'localhost:9000-9200' class NewVisitorTest(LiveServerTestCase): @classmethod def setUpClass(cls): for arg in sys.argv: if 'liveserver' in arg: cls.server_url = 'http://' + arg.split('=')[1] return super(NewVisitorTest, cls).setUpClass() cls.server_url = cls.live_server_url @classmethod def tearDownClass(cls): if cls.server_url == cls.live_server_url: super(NewVisitorTest, cls).tearDownClass() def setUp(self): # REALLY REALLY should do this soon... # TODO find a way to call populate_db experiment = Experiment(name='Test-exp', task_count=2, task_length=30, has_achievements=True, has_intake=True, has_followup=True, auto_tasking=True) experiment.save() test_tasks = [ {'name': 'Functional-Test-OT1', 'ot_survey_url': 'https://www.surveymonkey.com/s/LR37HZG', 'ot_exit_url': 'https://www.surveymonkey.com/s/VD8NQZT'}, {'name': 'Functional-Test-OT2', 'ot_survey_url': 'https://www.surveymonkey.com/s/LR37HZG', 'ot_exit_url': 'https://www.surveymonkey.com/s/VD8NQZT'}] dataset = Dataset(name='Functional-Test-DS', version='v0.1') dataset.save() Product(dataset=dataset, url='/static/testing/index.html', instructions=settings.STATIC_URL + 'testing/instructions.html', team='functional-test-team', name='functional-test-product', version='v0.1').save() for task in test_tasks: newtask = OpTask(dataset=dataset, name=task['name'], survey_url=task['ot_survey_url'], exit_url=task['ot_exit_url']).save() self.browser = webdriver.Firefox() self.browser.implicitly_wait(3) def tearDown(self): self.browser.close() def test_can_register_a_user_with_tasks(self): # browse to online portal self.browser.get(self.server_url) # check the title of the webpage self.assertIn('XDATA', self.browser.title) # click the sign in page self.browser.find_element_by_link_text("Register").click() # click to accept the intro material self.browser.find_element_by_id("intro-complete").submit() # register a new user inputemail = self.browser.find_element_by_name("email") inputemail.send_keys("new@test.com") inputpassword = self.browser.find_element_by_name("password") inputpassword.send_keys("new") inputpassword2 = self.browser.find_element_by_name("password2") inputpassword2.send_keys("new") register_button = self.browser.find_element_by_id("id_register_button") register_button.click() # check successful registration saved_users = User.objects.all() self.assertEqual(saved_users.count(), 1) self.assertEqual(saved_users[0].email, 'new@test.com') saved_products = Product.objects.all() self.assertEqual(saved_products.count(), 1) self.assertEqual(saved_products[0].team, 'functional-test-team') saved_task_list_items = TaskListItem.objects.all() self.assertEqual(saved_task_list_items.count(), 2) first_task = saved_task_list_items[0] second_task = saved_task_list_items[1] self.assertEqual(first_task.userprofile, second_task.userprofile) self.assertEqual(first_task.userprofile.user.email, 'new@test.com') self.assertEqual(second_task.userprofile.user.email, 'new@test.com') # def test_can_show_STOUT_and_ALE_integration(self): # experiment admin page load showing experiment setup # what does experiment setup process look like? # user registers and browses to task list # completes first part of experiment # browse back to experiment admin page to show results
apache-2.0
scottpurdy/NAB
tests/integration/corpus_test.py
10
4895
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import copy import numpy as np import os import pandas import shutil import tempfile import unittest import nab.corpus from nab.util import recur class CorpusTest(unittest.TestCase): @classmethod def setUpClass(cls): depth = 3 cls.root = recur(os.path.dirname, os.path.realpath(__file__), depth) cls.corpusSource = os.path.join(cls.root, "tests", "test_data") def setUp(self): self.corpus = nab.corpus.Corpus(self.corpusSource) def testGetDataFiles(self): """ Test the getDataFiles() function, specifically check if corpus.dataFiles is a dictionary containing DataFile objects containing pandas.DataFrame objects to represent the underlying data. """ for df in self.corpus.dataFiles.values(): self.assertIsInstance(df, nab.corpus.DataFile) self.assertIsInstance(df.data, pandas.DataFrame) self.assertEqual(set(df.data.columns.values), set(["timestamp", "value"])) def testAddColumn(self): """ Test the addColumn() function, specificially check if a new column named "test" is added. """ columnData = {} for relativePath, df in self.corpus.dataFiles.iteritems(): rows, _ = df.data.shape columnData[relativePath] = pandas.Series(np.zeros(rows)) self.corpus.addColumn("test", columnData, write=False) for df in self.corpus.dataFiles.values(): self.assertEqual(set(df.data.columns.values), set(["timestamp", "value", "test"])) def testRemoveColumn(self): """ Test the removeColumn() function, specifically check if an added column named "test" is removed. """ columnData = {} for relativePath, df in self.corpus.dataFiles.iteritems(): rows, _ = df.data.shape columnData[relativePath] = pandas.Series(np.zeros(rows)) self.corpus.addColumn("test", columnData, write=False) self.corpus.removeColumn("test", write=False) for df in self.corpus.dataFiles.values(): self.assertEqual(set(df.data.columns.values), set(["timestamp", "value"])) def testCopy(self): """ Test the copy() function, specifically check if it copies the whole corpus to another directory and that the copied corpus is the exact same as the original. """ copyLocation = os.path.join(tempfile.mkdtemp(), "test") self.corpus.copy(copyLocation) copyCorpus = nab.corpus.Corpus(copyLocation) for relativePath in self.corpus.dataFiles.keys(): self.assertIn(relativePath, copyCorpus.dataFiles.keys()) self.assertTrue( all(self.corpus.dataFiles[relativePath].data == \ copyCorpus.dataFiles[relativePath].data)) shutil.rmtree(copyLocation) def testAddDataSet(self): """ Test the addDataSet() function, specifically check if it adds a new data file in the correct location in directory and into the dataFiles attribute. """ copyLocation = os.path.join(tempfile.mkdtemp(), "test") copyCorpus = self.corpus.copy(copyLocation) for relativePath, df in self.corpus.dataFiles.iteritems(): newPath = relativePath + "_copy" copyCorpus.addDataSet(newPath, copy.deepcopy(df)) self.assertTrue(all(copyCorpus.dataFiles[newPath].data == df.data)) shutil.rmtree(copyLocation) def testGetDataSubset(self): """ Test the getDataSubset() function, specifically check if it returns only dataFiles with relativePaths that contain the query given. """ query1 = "realAWSCloudwatch" subset1 = self.corpus.getDataSubset(query1) self.assertEqual(len(subset1), 2) for relativePath in subset1.keys(): self.assertIn(query1, relativePath) query2 = "artificialWithAnomaly" subset2 = self.corpus.getDataSubset(query2) self.assertEqual(len(subset2), 1) for relativePath in subset2.keys(): self.assertIn(query2, relativePath) if __name__ == '__main__': unittest.main()
agpl-3.0
cpyou/odoo
addons/l10n_ma/l10n_ma.py
336
1952
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class l10n_ma_report(osv.osv): _name = 'l10n.ma.report' _description = 'Report for l10n_ma_kzc' _columns = { 'code': fields.char('Code', size=64), 'name': fields.char('Name'), 'line_ids': fields.one2many('l10n.ma.line', 'report_id', 'Lines', copy=True), } _sql_constraints = [ ('code_uniq', 'unique (code)','The code report must be unique !') ] class l10n_ma_line(osv.osv): _name = 'l10n.ma.line' _description = 'Report Lines for l10n_ma' _columns = { 'code': fields.char('Variable Name', size=64), 'definition': fields.char('Definition'), 'name': fields.char('Name'), 'report_id': fields.many2one('l10n.ma.report', 'Report'), } _sql_constraints = [ ('code_uniq', 'unique (code)', 'The variable name must be unique !') ] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
alxgu/ansible-modules-core
network/nxos/nxos_ping.py
23
13222
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = ''' --- module: nxos_ping version_added: "2.1" short_description: Tests reachability using ping from Nexus switch. description: - Tests reachability using ping from switch to a remote destination. extends_documentation_fragment: nxos author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) options: dest: description: - IP address or hostname (resolvable by switch) of remote node. required: true count: description: - Number of packets to send. required: false default: 2 source: description: - Source IP Address. required: false default: null vrf: description: - Outgoing VRF. required: false default: null ''' EXAMPLES = ''' # test reachability to 8.8.8.8 using mgmt vrf - nxos_ping: dest=8.8.8.8 vrf=management host=68.170.147.165 # Test reachability to a few different public IPs using mgmt vrf - nxos_ping: dest=nxos_ping vrf=management host=68.170.147.165 with_items: - 8.8.8.8 - 4.4.4.4 - 198.6.1.4 ''' RETURN = ''' action: description: - Show what action has been performed returned: always type: string sample: "PING 8.8.8.8 (8.8.8.8): 56 data bytes" updates: description: Show the command sent returned: always type: list sample: ["ping 8.8.8.8 count 2 vrf management"] count: description: Show amount of packets sent returned: always type: string sample: "2" dest: description: Show the ping destination returned: always type: string sample: "8.8.8.8" rtt: description: Show RTT stats returned: always type: dict sample: {"avg": "6.264","max":"6.564", "min": "5.978"} packets_rx: description: Packets successfully received returned: always type: string sample: "2" packets_tx: description: Packets successfully transmitted returned: always type: string sample: "2" packet_loss: description: Percentage of packets lost returned: always type: string sample: "0.00%" ''' import json import collections # COMMON CODE FOR MIGRATION import re from ansible.module_utils.basic import get_exception from ansible.module_utils.netcfg import NetworkConfig, ConfigLine from ansible.module_utils.shell import ShellError try: from ansible.module_utils.nxos import get_module except ImportError: from ansible.module_utils.nxos import NetworkModule def to_list(val): if isinstance(val, (list, tuple)): return list(val) elif val is not None: return [val] else: return list() class CustomNetworkConfig(NetworkConfig): def expand_section(self, configobj, S=None): if S is None: S = list() S.append(configobj) for child in configobj.children: if child in S: continue self.expand_section(child, S) return S def get_object(self, path): for item in self.items: if item.text == path[-1]: parents = [p.text for p in item.parents] if parents == path[:-1]: return item def to_block(self, section): return '\n'.join([item.raw for item in section]) def get_section(self, path): try: section = self.get_section_objects(path) return self.to_block(section) except ValueError: return list() def get_section_objects(self, path): if not isinstance(path, list): path = [path] obj = self.get_object(path) if not obj: raise ValueError('path does not exist in config') return self.expand_section(obj) def add(self, lines, parents=None): """Adds one or lines of configuration """ ancestors = list() offset = 0 obj = None ## global config command if not parents: for line in to_list(lines): item = ConfigLine(line) item.raw = line if item not in self.items: self.items.append(item) else: for index, p in enumerate(parents): try: i = index + 1 obj = self.get_section_objects(parents[:i])[0] ancestors.append(obj) except ValueError: # add parent to config offset = index * self.indent obj = ConfigLine(p) obj.raw = p.rjust(len(p) + offset) if ancestors: obj.parents = list(ancestors) ancestors[-1].children.append(obj) self.items.append(obj) ancestors.append(obj) # add child objects for line in to_list(lines): # check if child already exists for child in ancestors[-1].children: if child.text == line: break else: offset = len(parents) * self.indent item = ConfigLine(line) item.raw = line.rjust(len(line) + offset) item.parents = ancestors ancestors[-1].children.append(item) self.items.append(item) def get_network_module(**kwargs): try: return get_module(**kwargs) except NameError: return NetworkModule(**kwargs) def get_config(module, include_defaults=False): config = module.params['config'] if not config: try: config = module.get_config() except AttributeError: defaults = module.params['include_defaults'] config = module.config.get_config(include_defaults=defaults) return CustomNetworkConfig(indent=2, contents=config) def load_config(module, candidate): config = get_config(module) commands = candidate.difference(config) commands = [str(c).strip() for c in commands] save_config = module.params['save'] result = dict(changed=False) if commands: if not module.check_mode: try: module.configure(commands) except AttributeError: module.config(commands) if save_config: try: module.config.save_config() except AttributeError: module.execute(['copy running-config startup-config']) result['changed'] = True result['updates'] = commands return result # END OF COMMON CODE def get_summary(results_list, reference_point): summary_string = results_list[reference_point+1] summary_list = summary_string.split(',') pkts_tx = summary_list[0].split('packets')[0].strip() pkts_rx = summary_list[1].split('packets')[0].strip() pkt_loss = summary_list[2].split('packet')[0].strip() summary = dict(packets_tx=pkts_tx, packets_rx=pkts_rx, packet_loss=pkt_loss) if 'bytes from' not in results_list[reference_point-2]: ping_pass = False else: ping_pass = True return summary, ping_pass def get_rtt(results_list, packet_loss, location): if packet_loss != '100.00%': rtt_string = results_list[location] base = rtt_string.split('=')[1] rtt_list = base.split('/') min_rtt = rtt_list[0].lstrip() avg_rtt = rtt_list[1] max_rtt = rtt_list[2][:-3] rtt = dict(min=min_rtt, avg=avg_rtt, max=max_rtt) else: rtt = dict(min=None, avg=None, max=None) return rtt def get_statistics_summary_line(response_as_list): for each in response_as_list: if '---' in each: index = response_as_list.index(each) return index def execute_show(cmds, module, command_type=None): command_type_map = { 'cli_show': 'json', 'cli_show_ascii': 'text' } try: if command_type: response = module.execute(cmds, command_type=command_type) else: response = module.execute(cmds) except ShellError: clie = get_exception() module.fail_json(msg='Error sending {0}'.format(cmds), error=str(clie)) except AttributeError: try: if command_type: command_type = command_type_map.get(command_type) module.cli.add_commands(cmds, output=command_type) response = module.cli.run_commands() else: module.cli.add_commands(cmds, output=command_type) response = module.cli.run_commands() except ShellError: clie = get_exception() module.fail_json(msg='Error sending {0}'.format(cmds), error=str(clie)) return response def execute_show_command_ping(command, module, command_type='cli_show_ascii'): cmds = [command] if module.params['transport'] == 'cli': body = execute_show(cmds, module) elif module.params['transport'] == 'nxapi': body = execute_show(cmds, module, command_type=command_type) return body def get_ping_results(command, module, transport): ping = execute_show_command_ping(command, module)[0] if not ping: module.fail_json(msg="An unexpected error occurred. Check all params.", command=command, destination=module.params['dest'], vrf=module.params['vrf'], source=module.params['source']) elif "can't bind to address" in ping: module.fail_json(msg="Can't bind to source address.", command=command) elif "bad context" in ping: module.fail_json(msg="Wrong VRF name inserted.", command=command, vrf=module.params['vrf']) else: splitted_ping = ping.split('\n') reference_point = get_statistics_summary_line(splitted_ping) summary, ping_pass = get_summary(splitted_ping, reference_point) rtt = get_rtt(splitted_ping, summary['packet_loss'], reference_point+2) return (splitted_ping, summary, rtt, ping_pass) def main(): argument_spec = dict( dest=dict(required=True), count=dict(required=False, default=2), vrf=dict(required=False), source=dict(required=False), state=dict(required=False, choices=['present', 'absent'], default='present'), include_defaults=dict(default=False), config=dict(), save=dict(type='bool', default=False) ) module = get_network_module(argument_spec=argument_spec, supports_check_mode=True) destination = module.params['dest'] count = module.params['count'] vrf = module.params['vrf'] source = module.params['source'] state = module.params['state'] if count: try: if int(count) < 1 or int(count) > 655350: raise ValueError except ValueError: module.fail_json(msg="'count' must be an integer between 1 " "and 655350.", count=count) OPTIONS = { 'vrf': vrf, 'count': count, 'source': source } ping_command = 'ping {0}'.format(destination) for command, arg in OPTIONS.iteritems(): if arg: ping_command += ' {0} {1}'.format(command, arg) ping_results, summary, rtt, ping_pass = get_ping_results( ping_command, module, module.params['transport']) packet_loss = summary['packet_loss'] packets_rx = summary['packets_rx'] packets_tx = summary['packets_tx'] results = {} results['updates'] = [ping_command] results['action'] = ping_results[1] results['dest'] = destination results['count'] = count results['packets_tx'] = packets_tx results['packets_rx'] = packets_rx results['packet_loss'] = packet_loss results['rtt'] = rtt results['state'] = module.params['state'] if ping_pass and state == 'absent': module.fail_json(msg="Ping succeeded unexpectedly", results=results) elif not ping_pass and state == 'present': module.fail_json(msg="Ping failed unexpectedly", results=results) else: module.exit_json(**results) if __name__ == '__main__': main()
gpl-3.0
LethusTI/supportcenter
vendor/mongoengine/mongoengine/django/shortcuts.py
16
1598
from mongoengine.queryset import QuerySet from mongoengine.base import BaseDocument from mongoengine.errors import ValidationError def _get_queryset(cls): """Inspired by django.shortcuts.*""" if isinstance(cls, QuerySet): return cls else: return cls.objects def get_document_or_404(cls, *args, **kwargs): """ Uses get() to return an document, or raises a Http404 exception if the document does not exist. cls may be a Document or QuerySet object. All other passed arguments and keyword arguments are used in the get() query. Note: Like with get(), an MultipleObjectsReturned will be raised if more than one object is found. Inspired by django.shortcuts.* """ queryset = _get_queryset(cls) try: return queryset.get(*args, **kwargs) except (queryset._document.DoesNotExist, ValidationError): from django.http import Http404 raise Http404('No %s matches the given query.' % queryset._document._class_name) def get_list_or_404(cls, *args, **kwargs): """ Uses filter() to return a list of documents, or raise a Http404 exception if the list is empty. cls may be a Document or QuerySet object. All other passed arguments and keyword arguments are used in the filter() query. Inspired by django.shortcuts.* """ queryset = _get_queryset(cls) obj_list = list(queryset.filter(*args, **kwargs)) if not obj_list: from django.http import Http404 raise Http404('No %s matches the given query.' % queryset._document._class_name) return obj_list
gpl-3.0
YuepengGuo/pyspider
pyspider/scheduler/scheduler.py
53
33344
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<i@binux.me> # http://binux.me # Created on 2014-02-07 17:05:11 import os import json import time import logging import itertools from six.moves import queue as Queue from collections import deque from six import iteritems, itervalues from pyspider.libs import counter, utils from .task_queue import TaskQueue logger = logging.getLogger('scheduler') class Scheduler(object): UPDATE_PROJECT_INTERVAL = 5 * 60 default_schedule = { 'priority': 0, 'retries': 3, 'exetime': 0, 'age': -1, 'itag': None, } LOOP_LIMIT = 1000 LOOP_INTERVAL = 0.1 ACTIVE_TASKS = 100 INQUEUE_LIMIT = 0 EXCEPTION_LIMIT = 3 DELETE_TIME = 24 * 60 * 60 def __init__(self, taskdb, projectdb, newtask_queue, status_queue, out_queue, data_path='./data', resultdb=None): self.taskdb = taskdb self.projectdb = projectdb self.resultdb = resultdb self.newtask_queue = newtask_queue self.status_queue = status_queue self.out_queue = out_queue self.data_path = data_path self._send_buffer = deque() self._quit = False self._exceptions = 0 self.projects = dict() self._force_update_project = False self._last_update_project = 0 self.task_queue = dict() self._last_tick = int(time.time()) self._cnt = { "5m_time": counter.CounterManager( lambda: counter.TimebaseAverageEventCounter(30, 10)), "5m": counter.CounterManager( lambda: counter.TimebaseAverageWindowCounter(30, 10)), "1h": counter.CounterManager( lambda: counter.TimebaseAverageWindowCounter(60, 60)), "1d": counter.CounterManager( lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)), "all": counter.CounterManager( lambda: counter.TotalCounter()), } self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h')) self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d')) self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all')) self._last_dump_cnt = 0 def _update_projects(self): '''Check project update''' now = time.time() if ( not self._force_update_project and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now ): return for project in self.projectdb.check_update(self._last_update_project): self._update_project(project) logger.debug("project: %s updated.", project['name']) self._force_update_project = False self._last_update_project = now def _update_project(self, project): '''update one project''' if project['name'] not in self.projects: self.projects[project['name']] = {} self.projects[project['name']].update(project) self.projects[project['name']]['md5sum'] = utils.md5string(project['script']) if not self.projects[project['name']].get('active_tasks', None): self.projects[project['name']]['active_tasks'] = deque(maxlen=self.ACTIVE_TASKS) # load task queue when project is running and delete task_queue when project is stoped if project['status'] in ('RUNNING', 'DEBUG'): if project['name'] not in self.task_queue: self._load_tasks(project['name']) self.task_queue[project['name']].rate = project['rate'] self.task_queue[project['name']].burst = project['burst'] # update project runtime info from processor by sending a _on_get_info # request, result is in status_page.track.save self.on_select_task({ 'taskid': '_on_get_info', 'project': project['name'], 'url': 'data:,_on_get_info', 'status': self.taskdb.SUCCESS, 'fetch': { 'save': ['min_tick', ], }, 'process': { 'callback': '_on_get_info', }, }) else: if project['name'] in self.task_queue: self.task_queue[project['name']].rate = 0 self.task_queue[project['name']].burst = 0 del self.task_queue[project['name']] scheduler_task_fields = ['taskid', 'project', 'schedule', ] def _load_tasks(self, project): '''load tasks from database''' self.task_queue[project] = TaskQueue(rate=0, burst=0) for task in self.taskdb.load_tasks( self.taskdb.ACTIVE, project, self.scheduler_task_fields ): taskid = task['taskid'] _schedule = task.get('schedule', self.default_schedule) priority = _schedule.get('priority', self.default_schedule['priority']) exetime = _schedule.get('exetime', self.default_schedule['exetime']) self.task_queue[project].put(taskid, priority, exetime) logger.debug('project: %s loaded %d tasks.', project, len(self.task_queue[project])) if self.projects[project]['status'] in ('RUNNING', 'DEBUG'): self.task_queue[project].rate = self.projects[project]['rate'] self.task_queue[project].burst = self.projects[project]['burst'] else: self.task_queue[project].rate = 0 self.task_queue[project].burst = 0 if project not in self._cnt['all']: status_count = self.taskdb.status_count(project) self._cnt['all'].value( (project, 'success'), status_count.get(self.taskdb.SUCCESS, 0) ) self._cnt['all'].value( (project, 'failed'), status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0) ) self._cnt['all'].value((project, 'pending'), len(self.task_queue[project])) def task_verify(self, task): ''' return False if any of 'taskid', 'project', 'url' is not in task dict or project in not in task_queue ''' for each in ('taskid', 'project', 'url', ): if each not in task or not task[each]: logger.error('%s not in task: %.200r', each, task) return False if task['project'] not in self.task_queue: logger.error('unknown project: %s', task['project']) return False return True def insert_task(self, task): '''insert task into database''' return self.taskdb.insert(task['project'], task['taskid'], task) def update_task(self, task): '''update task in database''' return self.taskdb.update(task['project'], task['taskid'], task) def put_task(self, task): '''put task to task queue''' _schedule = task.get('schedule', self.default_schedule) self.task_queue[task['project']].put( task['taskid'], priority=_schedule.get('priority', self.default_schedule['priority']), exetime=_schedule.get('exetime', self.default_schedule['exetime']) ) def send_task(self, task, force=True): ''' dispatch task to fetcher out queue may have size limit to prevent block, a send_buffer is used ''' try: self.out_queue.put_nowait(task) except Queue.Full: if force: self._send_buffer.appendleft(task) else: raise def _check_task_done(self): '''Check status queue''' cnt = 0 try: while True: task = self.status_queue.get_nowait() # check _on_get_info result here if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task: self.projects[task['project']].update(task['track'].get('save') or {}) logger.info( '%s on_get_info %r', task['project'], task['track'].get('save', {}) ) continue elif not self.task_verify(task): continue self.on_task_status(task) cnt += 1 except Queue.Empty: pass return cnt merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime'] def _check_request(self): '''Check new task queue''' tasks = {} while len(tasks) < self.LOOP_LIMIT: try: task = self.newtask_queue.get_nowait() except Queue.Empty: break if isinstance(task, list): _tasks = task else: _tasks = (task, ) for task in _tasks: if not self.task_verify(task): continue if task['taskid'] in self.task_queue[task['project']]: if not task.get('schedule', {}).get('force_update', False): logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task) continue if task['taskid'] in tasks: if not task.get('schedule', {}).get('force_update', False): continue tasks[task['taskid']] = task for task in itervalues(tasks): if self.INQUEUE_LIMIT and len(self.task_queue[task['project']]) >= self.INQUEUE_LIMIT: logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task) continue oldtask = self.taskdb.get_task(task['project'], task['taskid'], fields=self.merge_task_fields) if oldtask: task = self.on_old_request(task, oldtask) else: task = self.on_new_request(task) return len(tasks) def _check_cronjob(self): """Check projects cronjob tick, return True when a new tick is sended""" now = time.time() self._last_tick = int(self._last_tick) if now - self._last_tick < 1: return False self._last_tick += 1 for project in itervalues(self.projects): if project['status'] not in ('DEBUG', 'RUNNING'): continue if project.get('min_tick', 0) == 0: continue if self._last_tick % int(project['min_tick']) != 0: continue self.on_select_task({ 'taskid': '_on_cronjob', 'project': project['name'], 'url': 'data:,_on_cronjob', 'status': self.taskdb.SUCCESS, 'fetch': { 'save': { 'tick': self._last_tick, }, }, 'process': { 'callback': '_on_cronjob', }, }) return True request_task_fields = [ 'taskid', 'project', 'url', 'status', 'schedule', 'fetch', 'process', 'track', 'lastcrawltime' ] def _check_select(self): '''Select task to fetch & process''' while self._send_buffer: _task = self._send_buffer.pop() try: # use force=False here to prevent automatic send_buffer append and get exception self.send_task(_task, False) except Queue.Full: self._send_buffer.append(_task) break if self.out_queue.full(): return {} taskids = [] cnt = 0 cnt_dict = dict() limit = self.LOOP_LIMIT for project, task_queue in iteritems(self.task_queue): if cnt >= limit: break # task queue self.task_queue[project].check_update() project_cnt = 0 # check send_buffer here. when not empty, out_queue may blocked. Not sending tasks while cnt < limit and project_cnt < limit / 10: taskid = task_queue.get() if not taskid: break taskids.append((project, taskid)) project_cnt += 1 cnt += 1 cnt_dict[project] = project_cnt for project, taskid in taskids: task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields) if not task: continue task = self.on_select_task(task) return cnt_dict def _print_counter_log(self): # print top 5 active counters keywords = ('pending', 'success', 'retry', 'failed') total_cnt = {} project_actives = [] project_fails = [] for key in keywords: total_cnt[key] = 0 for project, subcounter in iteritems(self._cnt['5m']): actives = 0 for key in keywords: cnt = subcounter.get(key, None) if cnt: cnt = cnt.sum total_cnt[key] += cnt actives += cnt project_actives.append((actives, project)) fails = subcounter.get('failed', None) if fails: project_fails.append((fails.sum, project)) top_2_fails = sorted(project_fails, reverse=True)[:2] top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails], reverse=True)[:5 - len(top_2_fails)] log_str = ("in 5m: new:%(pending)d,success:%(success)d," "retry:%(retry)d,failed:%(failed)d" % total_cnt) for _, project in itertools.chain(top_3_actives, top_2_fails): subcounter = self._cnt['5m'][project].to_dict(get_value='sum') log_str += " %s:%d,%d,%d,%d" % (project, subcounter.get('pending', 0), subcounter.get('success', 0), subcounter.get('retry', 0), subcounter.get('failed', 0)) logger.info(log_str) def _dump_cnt(self): '''Dump counters to file''' self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h')) self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d')) self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all')) def _try_dump_cnt(self): '''Dump counters every 60 seconds''' now = time.time() if now - self._last_dump_cnt > 60: self._last_dump_cnt = now self._dump_cnt() self._print_counter_log() def _check_delete(self): '''Check project delete''' now = time.time() for project in list(itervalues(self.projects)): if project['status'] != 'STOP': continue if now - project['updatetime'] < self.DELETE_TIME: continue if 'delete' not in self.projectdb.split_group(project['group']): continue logger.warning("deleting project: %s!", project['name']) if project['name'] in self.task_queue: self.task_queue[project['name']].rate = 0 self.task_queue[project['name']].burst = 0 del self.task_queue[project['name']] del self.projects[project['name']] self.taskdb.drop(project['name']) self.projectdb.drop(project['name']) if self.resultdb: self.resultdb.drop(project['name']) def __len__(self): return sum(len(x) for x in itervalues(self.task_queue)) def quit(self): '''Set quit signal''' self._quit = True def run_once(self): '''comsume queues and feed tasks to fetcher, once''' self._update_projects() self._check_task_done() self._check_request() while self._check_cronjob(): pass self._check_select() self._check_delete() self._try_dump_cnt() def run(self): '''Start scheduler loop''' logger.info("loading projects") while not self._quit: try: time.sleep(self.LOOP_INTERVAL) self.run_once() self._exceptions = 0 except KeyboardInterrupt: break except Exception as e: logger.exception(e) self._exceptions += 1 if self._exceptions > self.EXCEPTION_LIMIT: break continue logger.info("scheduler exiting...") self._dump_cnt() def trigger_on_start(self, project): '''trigger an on_start callback of project''' self.newtask_queue.put({ "project": project, "taskid": "on_start", "url": "data:,on_start", "process": { "callback": "on_start", }, }) def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False): '''Start xmlrpc interface''' try: from six.moves.xmlrpc_server import SimpleXMLRPCServer except ImportError: from SimpleXMLRPCServer import SimpleXMLRPCServer server = SimpleXMLRPCServer((bind, port), allow_none=True, logRequests=logRequests) server.register_introspection_functions() server.register_multicall_functions() server.register_function(self.quit, '_quit') server.register_function(self.__len__, 'size') def dump_counter(_time, _type): try: return self._cnt[_time].to_dict(_type) except: logger.exception('') server.register_function(dump_counter, 'counter') def new_task(task): if self.task_verify(task): self.newtask_queue.put(task) return True return False server.register_function(new_task, 'newtask') def send_task(task): '''dispatch task to fetcher''' self.send_task(task) return True server.register_function(send_task, 'send_task') def update_project(): self._force_update_project = True server.register_function(update_project, 'update_project') def get_active_tasks(project=None, limit=100): allowed_keys = set(( 'taskid', 'project', 'status', 'url', 'lastcrawltime', 'updatetime', 'track', )) track_allowed_keys = set(( 'ok', 'time', 'follows', 'status_code', )) iters = [iter(x['active_tasks']) for k, x in iteritems(self.projects) if x and (k == project if project else True)] tasks = [next(x, None) for x in iters] result = [] while len(result) < limit and tasks and not all(x is None for x in tasks): updatetime, task = t = max(tasks) i = tasks.index(t) tasks[i] = next(iters[i], None) for key in list(task): if key == 'track': for k in list(task[key].get('fetch', [])): if k not in track_allowed_keys: del task[key]['fetch'][k] for k in list(task[key].get('process', [])): if k not in track_allowed_keys: del task[key]['process'][k] if key in allowed_keys: continue del task[key] result.append(t) # fix for "<type 'exceptions.TypeError'>:dictionary key must be string" # have no idea why return json.loads(json.dumps(result)) server.register_function(get_active_tasks, 'get_active_tasks') server.timeout = 0.5 while not self._quit: server.handle_request() server.server_close() def on_new_request(self, task): '''Called when a new request is arrived''' task['status'] = self.taskdb.ACTIVE self.insert_task(task) self.put_task(task) project = task['project'] self._cnt['5m'].event((project, 'pending'), +1) self._cnt['1h'].event((project, 'pending'), +1) self._cnt['1d'].event((project, 'pending'), +1) self._cnt['all'].event((project, 'pending'), +1) logger.info('new task %(project)s:%(taskid)s %(url)s', task) return task def on_old_request(self, task, old_task): '''Called when a crawled task is arrived''' now = time.time() _schedule = task.get('schedule', self.default_schedule) old_schedule = old_task.get('schedule', {}) restart = False schedule_age = _schedule.get('age', self.default_schedule['age']) if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'): restart = True elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now: restart = True elif _schedule.get('force_update'): restart = True if not restart: logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task) return task['status'] = self.taskdb.ACTIVE self.update_task(task) self.put_task(task) project = task['project'] if old_task['status'] != self.taskdb.ACTIVE: self._cnt['5m'].event((project, 'pending'), +1) self._cnt['1h'].event((project, 'pending'), +1) self._cnt['1d'].event((project, 'pending'), +1) if old_task['status'] == self.taskdb.SUCCESS: self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1) elif old_task['status'] == self.taskdb.FAILED: self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1) logger.info('restart task %(project)s:%(taskid)s %(url)s', task) return task def on_task_status(self, task): '''Called when a status pack is arrived''' try: procesok = task['track']['process']['ok'] if not self.task_queue[task['project']].done(task['taskid']): logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task) return None except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']]['active_tasks'].appendleft((time.time(), task)) return ret def on_task_done(self, task): '''Called when a task is done and success, called by `on_task_status`''' task['status'] = self.taskdb.SUCCESS task['lastcrawltime'] = time.time() if 'schedule' in task: if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']: task['status'] = self.taskdb.ACTIVE next_exetime = task['schedule'].get('age') task['schedule']['exetime'] = time.time() + next_exetime self.put_task(task) else: del task['schedule'] self.update_task(task) project = task['project'] self._cnt['5m'].event((project, 'success'), +1) self._cnt['1h'].event((project, 'success'), +1) self._cnt['1d'].event((project, 'success'), +1) self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1) logger.info('task done %(project)s:%(taskid)s %(url)s', task) return task def on_task_failed(self, task): '''Called when a task is failed, called by `on_task_status`''' if 'schedule' not in task: old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule']) if old_task is None: logging.error('unknown status pack: %s' % task) return task['schedule'] = old_task.get('schedule', {}) retries = task['schedule'].get('retries', self.default_schedule['retries']) retried = task['schedule'].get('retried', 0) if retried == 0: next_exetime = 0 elif retried == 1: next_exetime = 1 * 60 * 60 else: next_exetime = 6 * (2 ** retried) * 60 * 60 if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']: next_exetime = min(next_exetime, task['schedule'].get('age')) elif retried >= retries: next_exetime = -1 if next_exetime < 0: task['status'] = self.taskdb.FAILED task['lastcrawltime'] = time.time() self.update_task(task) project = task['project'] self._cnt['5m'].event((project, 'failed'), +1) self._cnt['1h'].event((project, 'failed'), +1) self._cnt['1d'].event((project, 'failed'), +1) self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1) logger.info('task failed %(project)s:%(taskid)s %(url)s' % task) return task else: task['schedule']['retried'] = retried + 1 task['schedule']['exetime'] = time.time() + next_exetime task['lastcrawltime'] = time.time() self.update_task(task) self.put_task(task) project = task['project'] self._cnt['5m'].event((project, 'retry'), +1) self._cnt['1h'].event((project, 'retry'), +1) self._cnt['1d'].event((project, 'retry'), +1) # self._cnt['all'].event((project, 'retry'), +1) logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % ( retried, retries), task) return task def on_select_task(self, task): '''Called when a task is selected to fetch & process''' # inject informations about project logger.info('select %(project)s:%(taskid)s %(url)s', task) project_info = self.projects.get(task['project']) assert project_info, 'no such project' task['group'] = project_info.get('group') task['project_md5sum'] = project_info.get('md5sum') task['project_updatetime'] = project_info.get('updatetime', 0) project_info['active_tasks'].appendleft((time.time(), task)) self.send_task(task) return task from tornado import gen class OneScheduler(Scheduler): """ Scheduler Mixin class for one mode overwirted send_task method call processor.on_task(fetcher.fetch(task)) instead of consuming queue """ def _check_select(self): """ interactive mode of select tasks """ if not self.interactive: return super(OneScheduler, self)._check_select() # waiting for running tasks if self.running_task > 0: return is_crawled = [] def run(project=None): return crawl('on_start', project=project) def crawl(url, project=None, **kwargs): """ Crawl given url, same parameters as BaseHandler.crawl url - url or taskid, parameters will be used if in taskdb project - can be ignored if only one project exists. """ # looking up the project instance if project is None: if len(self.projects) == 1: project = list(self.projects.keys())[0] else: raise LookupError('You need specify the project: %r' % list(self.projects.keys())) project_data = self.processor.project_manager.get(project) if not project_data: raise LookupError('no such project: %s' % project) # get task package instance = project_data['instance'] instance._reset() task = instance.crawl(url, **kwargs) if isinstance(task, list): raise Exception('url list is not allowed in interactive mode') # check task in taskdb if not kwargs: dbtask = self.taskdb.get_task(task['project'], task['taskid'], fields=self.request_task_fields) if not dbtask: dbtask = self.taskdb.get_task(task['project'], task['url'], fields=self.request_task_fields) if dbtask: task = dbtask # select the task self.on_select_task(task) is_crawled.append(True) shell.ask_exit() def quit_interactive(): '''Quit interactive mode''' is_crawled.append(True) self.interactive = False shell.ask_exit() def quit_pyspider(): '''Close pyspider''' is_crawled[:] = [] shell.ask_exit() shell = utils.get_python_console() shell.interact( 'pyspider shell - Select task\n' 'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n' 'quit_interactive() - Quit interactive mode\n' 'quit_pyspider() - Close pyspider' ) if not is_crawled: self.ioloop.stop() def __getattr__(self, name): """patch for crawl(url, callback=self.index_page) API""" if self.interactive: return name raise AttributeError(name) def on_task_status(self, task): """Ignore not processing error in interactive mode""" if not self.interactive: super(OneScheduler, self).on_task_status(task) try: procesok = task['track']['process']['ok'] except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']]['active_tasks'].appendleft((time.time(), task)) return ret def init_one(self, ioloop, fetcher, processor, result_worker=None, interactive=False): self.ioloop = ioloop self.fetcher = fetcher self.processor = processor self.result_worker = result_worker self.interactive = interactive self.running_task = 0 @gen.coroutine def do_task(self, task): self.running_task += 1 result = yield gen.Task(self.fetcher.fetch, task) type, task, response = result.args self.processor.on_task(task, response) # do with message while not self.processor.inqueue.empty(): _task, _response = self.processor.inqueue.get() self.processor.on_task(_task, _response) # do with results while not self.processor.result_queue.empty(): _task, _result = self.processor.result_queue.get() if self.result_worker: self.result_worker.on_result(_task, _result) self.running_task -= 1 def send_task(self, task, force=True): if self.fetcher.http_client.free_size() <= 0: if force: self._send_buffer.appendleft(task) else: raise self.outqueue.Full self.ioloop.add_future(self.do_task(task), lambda x: x.result()) def run(self): import tornado.ioloop tornado.ioloop.PeriodicCallback(self.run_once, 100, io_loop=self.ioloop).start() self.ioloop.start() def quit(self): self.ioloop.stop() logger.info("scheduler exiting...")
apache-2.0
Nikoli/youtube-dl
youtube_dl/extractor/podomatic.py
198
2327
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import int_or_none class PodomaticIE(InfoExtractor): IE_NAME = 'podomatic' _VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)' _TESTS = [ { 'url': 'http://scienceteachingtips.podomatic.com/entry/2009-01-02T16_03_35-08_00', 'md5': '84bb855fcf3429e6bf72460e1eed782d', 'info_dict': { 'id': '2009-01-02T16_03_35-08_00', 'ext': 'mp3', 'uploader': 'Science Teaching Tips', 'uploader_id': 'scienceteachingtips', 'title': '64. When the Moon Hits Your Eye', 'duration': 446, } }, { 'url': 'http://ostbahnhof.podomatic.com/entry/2013-11-15T16_31_21-08_00', 'md5': 'd2cf443931b6148e27638650e2638297', 'info_dict': { 'id': '2013-11-15T16_31_21-08_00', 'ext': 'mp3', 'uploader': 'Ostbahnhof / Techno Mix', 'uploader_id': 'ostbahnhof', 'title': 'Einunddreizig', 'duration': 3799, } }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') channel = mobj.group('channel') json_url = (('%s://%s.podomatic.com/entry/embed_params/%s' + '?permalink=true&rtmp=0') % (mobj.group('proto'), channel, video_id)) data_json = self._download_webpage( json_url, video_id, 'Downloading video info') data = json.loads(data_json) video_url = data['downloadLink'] if not video_url: video_url = '%s/%s' % (data['streamer'].replace('rtmp', 'http'), data['mediaLocation']) uploader = data['podcast'] title = data['title'] thumbnail = data['imageLocation'] duration = int_or_none(data.get('length'), 1000) return { 'id': video_id, 'url': video_url, 'title': title, 'uploader': uploader, 'uploader_id': channel, 'thumbnail': thumbnail, 'duration': duration, }
unlicense
qifeigit/scikit-learn
sklearn/externals/joblib/pool.py
237
23894
"""Custom implementation of multiprocessing.Pool with custom pickler This module provides efficient ways of working with data stored in shared memory with numpy.memmap arrays without inducing any memory copy between the parent and child processes. This module should not be imported if multiprocessing is not available as it implements subclasses of multiprocessing Pool that uses a custom alternative to SimpleQueue. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # Copyright: 2012, Olivier Grisel # License: BSD 3 clause from mmap import mmap import errno import os import stat import sys import threading import atexit import tempfile import shutil try: # Python 2 compat from cPickle import loads from cPickle import dumps except ImportError: from pickle import loads from pickle import dumps import copyreg # Customizable pure Python pickler in Python 2 # customizable C-optimized pickler under Python 3.3+ from pickle import Pickler from pickle import HIGHEST_PROTOCOL from io import BytesIO from ._multiprocessing_helpers import mp, assert_spawning # We need the class definition to derive from it not the multiprocessing.Pool # factory function from multiprocessing.pool import Pool try: import numpy as np from numpy.lib.stride_tricks import as_strided except ImportError: np = None from .numpy_pickle import load from .numpy_pickle import dump from .hashing import hash # Some system have a ramdisk mounted by default, we can use it instead of /tmp # as the default folder to dump big arrays to share with subprocesses SYSTEM_SHARED_MEM_FS = '/dev/shm' # Folder and file permissions to chmod temporary files generated by the # memmaping pool. Only the owner of the Python process can access the # temporary files and folder. FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR ############################################################################### # Support for efficient transient pickling of numpy data structures def _get_backing_memmap(a): """Recursively look up the original np.memmap instance base if any""" b = getattr(a, 'base', None) if b is None: # TODO: check scipy sparse datastructure if scipy is installed # a nor its descendants do not have a memmap base return None elif isinstance(b, mmap): # a is already a real memmap instance. return a else: # Recursive exploration of the base ancestry return _get_backing_memmap(b) def has_shareable_memory(a): """Return True if a is backed by some mmap buffer directly or not""" return _get_backing_memmap(a) is not None def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides, total_buffer_len): """Reconstruct an array view on a memmory mapped file""" if mode == 'w+': # Do not zero the original data when unpickling mode = 'r+' if strides is None: # Simple, contiguous memmap return np.memmap(filename, dtype=dtype, shape=shape, mode=mode, offset=offset, order=order) else: # For non-contiguous data, memmap the total enclosing buffer and then # extract the non-contiguous view with the stride-tricks API base = np.memmap(filename, dtype=dtype, shape=total_buffer_len, mode=mode, offset=offset, order=order) return as_strided(base, shape=shape, strides=strides) def _reduce_memmap_backed(a, m): """Pickling reduction for memmap backed arrays a is expected to be an instance of np.ndarray (or np.memmap) m is expected to be an instance of np.memmap on the top of the ``base`` attribute ancestry of a. ``m.base`` should be the real python mmap object. """ # offset that comes from the striding differences between a and m a_start, a_end = np.byte_bounds(a) m_start = np.byte_bounds(m)[0] offset = a_start - m_start # offset from the backing memmap offset += m.offset if m.flags['F_CONTIGUOUS']: order = 'F' else: # The backing memmap buffer is necessarily contiguous hence C if not # Fortran order = 'C' if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']: # If the array is a contiguous view, no need to pass the strides strides = None total_buffer_len = None else: # Compute the total number of items to map from which the strided # view will be extracted. strides = a.strides total_buffer_len = (a_end - a_start) // a.itemsize return (_strided_from_memmap, (m.filename, a.dtype, m.mode, offset, order, a.shape, strides, total_buffer_len)) def reduce_memmap(a): """Pickle the descriptors of a memmap instance to reopen on same file""" m = _get_backing_memmap(a) if m is not None: # m is a real mmap backed memmap instance, reduce a preserving striding # information return _reduce_memmap_backed(a, m) else: # This memmap instance is actually backed by a regular in-memory # buffer: this can happen when using binary operators on numpy.memmap # instances return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),)) class ArrayMemmapReducer(object): """Reducer callable to dump large arrays to memmap files. Parameters ---------- max_nbytes: int Threshold to trigger memmaping of large arrays to files created a folder. temp_folder: str Path of a folder where files for backing memmaped arrays are created. mmap_mode: 'r', 'r+' or 'c' Mode for the created memmap datastructure. See the documentation of numpy.memmap for more details. Note: 'w+' is coerced to 'r+' automatically to avoid zeroing the data on unpickling. verbose: int, optional, 0 by default If verbose > 0, memmap creations are logged. If verbose > 1, both memmap creations, reuse and array pickling are logged. context_id: int, optional, None by default Set to a value identifying a call context to spare costly hashing of the content of the input arrays when it is safe to assume that each array will not be mutated by the parent process for the duration of the dispatch process. This is the case when using the high level Parallel API. It might not be the case when using the MemmapingPool API directly. prewarm: bool, optional, False by default. Force a read on newly memmaped array to make sure that OS pre-cache it memory. This can be useful to avoid concurrent disk access when the same data array is passed to different worker processes. """ def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0, context_id=None, prewarm=True): self._max_nbytes = max_nbytes self._temp_folder = temp_folder self._mmap_mode = mmap_mode self.verbose = int(verbose) self._context_id = context_id self._prewarm = prewarm def __call__(self, a): m = _get_backing_memmap(a) if m is not None: # a is already backed by a memmap file, let's reuse it directly return _reduce_memmap_backed(a, m) if (not a.dtype.hasobject and self._max_nbytes is not None and a.nbytes > self._max_nbytes): # check that the folder exists (lazily create the pool temp folder # if required) try: os.makedirs(self._temp_folder) os.chmod(self._temp_folder, FOLDER_PERMISSIONS) except OSError as e: if e.errno != errno.EEXIST: raise e # Find a unique, concurrent safe filename for writing the # content of this array only once. if self._context_id is not None: marker = self._context_id else: marker = hash(a) basename = "%d-%d-%d-%s.pkl" % ( os.getpid(), id(threading.current_thread()), id(a), marker) filename = os.path.join(self._temp_folder, basename) # In case the same array with the same content is passed several # times to the pool subprocess children, serialize it only once # XXX: implement an explicit reference counting scheme to make it # possible to delete temporary files as soon as the workers are # done processing this data. if not os.path.exists(filename): if self.verbose > 0: print("Memmaping (shape=%r, dtype=%s) to new file %s" % ( a.shape, a.dtype, filename)) for dumped_filename in dump(a, filename): os.chmod(dumped_filename, FILE_PERMISSIONS) if self._prewarm: # Warm up the data to avoid concurrent disk access in # multiple children processes load(filename, mmap_mode=self._mmap_mode).max() elif self.verbose > 1: print("Memmaping (shape=%s, dtype=%s) to old file %s" % ( a.shape, a.dtype, filename)) # Let's use the memmap reducer return reduce_memmap(load(filename, mmap_mode=self._mmap_mode)) else: # do not convert a into memmap, let pickler do its usual copy with # the default system pickler if self.verbose > 1: print("Pickling array (shape=%r, dtype=%s)." % ( a.shape, a.dtype)) return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),)) ############################################################################### # Enable custom pickling in Pool queues class CustomizablePickler(Pickler): """Pickler that accepts custom reducers. HIGHEST_PROTOCOL is selected by default as this pickler is used to pickle ephemeral datastructures for interprocess communication hence no backward compatibility is required. `reducers` is expected expected to be a dictionary with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ # We override the pure Python pickler as its the only way to be able to # customize the dispatch table without side effects in Python 2.6 # to 3.2. For Python 3.3+ leverage the new dispatch_table # feature from http://bugs.python.org/issue14166 that makes it possible # to use the C implementation of the Pickler which is faster. def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): Pickler.__init__(self, writer, protocol=protocol) if reducers is None: reducers = {} if hasattr(Pickler, 'dispatch'): # Make the dispatch registry an instance level attribute instead of # a reference to the class dictionary under Python 2 self.dispatch = Pickler.dispatch.copy() else: # Under Python 3 initialize the dispatch table with a copy of the # default registry self.dispatch_table = copyreg.dispatch_table.copy() for type, reduce_func in reducers.items(): self.register(type, reduce_func) def register(self, type, reduce_func): if hasattr(Pickler, 'dispatch'): # Python 2 pickler dispatching is not explicitly customizable. # Let us use a closure to workaround this limitation. def dispatcher(self, obj): reduced = reduce_func(obj) self.save_reduce(obj=obj, *reduced) self.dispatch[type] = dispatcher else: self.dispatch_table[type] = reduce_func class CustomizablePicklingQueue(object): """Locked Pipe implementation that uses a customizable pickler. This class is an alternative to the multiprocessing implementation of SimpleQueue in order to make it possible to pass custom pickling reducers, for instance to avoid memory copy when passing memmory mapped datastructures. `reducers` is expected expected to be a dictionary with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ def __init__(self, context, reducers=None): self._reducers = reducers self._reader, self._writer = context.Pipe(duplex=False) self._rlock = context.Lock() if sys.platform == 'win32': self._wlock = None else: self._wlock = context.Lock() self._make_methods() def __getstate__(self): assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock, self._reducers) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock, self._reducers) = state self._make_methods() def empty(self): return not self._reader.poll() def _make_methods(self): self._recv = recv = self._reader.recv racquire, rrelease = self._rlock.acquire, self._rlock.release def get(): racquire() try: return recv() finally: rrelease() self.get = get if self._reducers: def send(obj): buffer = BytesIO() CustomizablePickler(buffer, self._reducers).dump(obj) self._writer.send_bytes(buffer.getvalue()) self._send = send else: self._send = send = self._writer.send if self._wlock is None: # writes to a message oriented win32 pipe are atomic self.put = send else: wlock_acquire, wlock_release = ( self._wlock.acquire, self._wlock.release) def put(obj): wlock_acquire() try: return send(obj) finally: wlock_release() self.put = put class PicklingPool(Pool): """Pool implementation with customizable pickling reducers. This is useful to control how data is shipped between processes and makes it possible to use shared memory without useless copies induces by the default pickling methods of the original objects passed as arguments to dispatch. `forward_reducers` and `backward_reducers` are expected to be dictionaries with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ def __init__(self, processes=None, forward_reducers=None, backward_reducers=None, **kwargs): if forward_reducers is None: forward_reducers = dict() if backward_reducers is None: backward_reducers = dict() self._forward_reducers = forward_reducers self._backward_reducers = backward_reducers poolargs = dict(processes=processes) poolargs.update(kwargs) super(PicklingPool, self).__init__(**poolargs) def _setup_queues(self): context = getattr(self, '_ctx', mp) self._inqueue = CustomizablePicklingQueue(context, self._forward_reducers) self._outqueue = CustomizablePicklingQueue(context, self._backward_reducers) self._quick_put = self._inqueue._send self._quick_get = self._outqueue._recv def delete_folder(folder_path): """Utility function to cleanup a temporary folder if still existing""" if os.path.exists(folder_path): shutil.rmtree(folder_path) class MemmapingPool(PicklingPool): """Process pool that shares large arrays to avoid memory copy. This drop-in replacement for `multiprocessing.pool.Pool` makes it possible to work efficiently with shared memory in a numpy context. Existing instances of numpy.memmap are preserved: the child suprocesses will have access to the same shared memory in the original mode except for the 'w+' mode that is automatically transformed as 'r+' to avoid zeroing the original data upon instantiation. Furthermore large arrays from the parent process are automatically dumped to a temporary folder on the filesystem such as child processes to access their content via memmaping (file system backed shared memory). Note: it is important to call the terminate method to collect the temporary folder used by the pool. Parameters ---------- processes: int, optional Number of worker processes running concurrently in the pool. initializer: callable, optional Callable executed on worker process creation. initargs: tuple, optional Arguments passed to the initializer callable. temp_folder: str, optional Folder to be used by the pool for memmaping large arrays for sharing memory with worker processes. If None, this will try in order: - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, - /dev/shm if the folder exists and is writable: this is a RAMdisk filesystem available by default on modern Linux distributions, - the default system temporary folder that can be overridden with TMP, TMPDIR or TEMP environment variables, typically /tmp under Unix operating systems. max_nbytes int or None, optional, 1e6 by default Threshold on the size of arrays passed to the workers that triggers automated memmory mapping in temp_folder. Use None to disable memmaping of large arrays. forward_reducers: dictionary, optional Reducers used to pickle objects passed from master to worker processes: see below. backward_reducers: dictionary, optional Reducers used to pickle return values from workers back to the master process. verbose: int, optional Make it possible to monitor how the communication of numpy arrays with the subprocess is handled (pickling or memmaping) context_id: int, optional, None by default Set to a value identifying a call context to spare costly hashing of the content of the input arrays when it is safe to assume that each array will not be mutated by the parent process for the duration of the dispatch process. This is the case when using the high level Parallel API. prewarm: bool or str, optional, "auto" by default. If True, force a read on newly memmaped array to make sure that OS pre- cache it in memory. This can be useful to avoid concurrent disk access when the same data array is passed to different worker processes. If "auto" (by default), prewarm is set to True, unless the Linux shared memory partition /dev/shm is available and used as temp_folder. `forward_reducers` and `backward_reducers` are expected to be dictionaries with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6, mmap_mode='r', forward_reducers=None, backward_reducers=None, verbose=0, context_id=None, prewarm=False, **kwargs): if forward_reducers is None: forward_reducers = dict() if backward_reducers is None: backward_reducers = dict() # Prepare a sub-folder name for the serialization of this particular # pool instance (do not create in advance to spare FS write access if # no array is to be dumped): use_shared_mem = False pool_folder_name = "joblib_memmaping_pool_%d_%d" % ( os.getpid(), id(self)) if temp_folder is None: temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None) if temp_folder is None: if os.path.exists(SYSTEM_SHARED_MEM_FS): try: temp_folder = SYSTEM_SHARED_MEM_FS pool_folder = os.path.join(temp_folder, pool_folder_name) if not os.path.exists(pool_folder): os.makedirs(pool_folder) use_shared_mem = True except IOError: # Missing rights in the the /dev/shm partition, # fallback to regular temp folder. temp_folder = None if temp_folder is None: # Fallback to the default tmp folder, typically /tmp temp_folder = tempfile.gettempdir() temp_folder = os.path.abspath(os.path.expanduser(temp_folder)) pool_folder = os.path.join(temp_folder, pool_folder_name) self._temp_folder = pool_folder # Register the garbage collector at program exit in case caller forgets # to call terminate explicitly: note we do not pass any reference to # self to ensure that this callback won't prevent garbage collection of # the pool instance and related file handler resources such as POSIX # semaphores and pipes atexit.register(lambda: delete_folder(pool_folder)) if np is not None: # Register smart numpy.ndarray reducers that detects memmap backed # arrays and that is alse able to dump to memmap large in-memory # arrays over the max_nbytes threshold if prewarm == "auto": prewarm = not use_shared_mem forward_reduce_ndarray = ArrayMemmapReducer( max_nbytes, pool_folder, mmap_mode, verbose, context_id=context_id, prewarm=prewarm) forward_reducers[np.ndarray] = forward_reduce_ndarray forward_reducers[np.memmap] = reduce_memmap # Communication from child process to the parent process always # pickles in-memory numpy.ndarray without dumping them as memmap # to avoid confusing the caller and make it tricky to collect the # temporary folder backward_reduce_ndarray = ArrayMemmapReducer( None, pool_folder, mmap_mode, verbose) backward_reducers[np.ndarray] = backward_reduce_ndarray backward_reducers[np.memmap] = reduce_memmap poolargs = dict( processes=processes, forward_reducers=forward_reducers, backward_reducers=backward_reducers) poolargs.update(kwargs) super(MemmapingPool, self).__init__(**poolargs) def terminate(self): super(MemmapingPool, self).terminate() delete_folder(self._temp_folder)
bsd-3-clause
argonemyth/sentry
tests/sentry/api/endpoints/test_organization_member_details.py
25
4338
from __future__ import absolute_import from django.core.urlresolvers import reverse from mock import patch from sentry.models import ( AuthProvider, OrganizationMember, OrganizationMemberType ) from sentry.testutils import APITestCase class UpdateOrganizationMemberTest(APITestCase): @patch('sentry.models.OrganizationMember.send_invite_email') def test_reinvite_pending_member(self, mock_send_invite_email): self.login_as(user=self.user) organization = self.create_organization(name='foo', owner=self.user) member_om = OrganizationMember.objects.create( organization=organization, email='foo@example.com', type=OrganizationMemberType.MEMBER, has_global_access=False, ) path = reverse('sentry-api-0-organization-member-details', args=[organization.slug, member_om.id]) self.login_as(self.user) resp = self.client.put(path, data={'reinvite': 1}) assert resp.status_code == 204 mock_send_invite_email.assert_called_once_with() @patch('sentry.models.OrganizationMember.send_sso_link_email') def test_reinvite_sso_link(self, mock_send_sso_link_email): self.login_as(user=self.user) organization = self.create_organization(name='foo', owner=self.user) member = self.create_user('bar@example.com') member_om = OrganizationMember.objects.create( organization=organization, user=member, type=OrganizationMemberType.MEMBER, has_global_access=False, ) AuthProvider.objects.create(organization=organization, provider='dummy') path = reverse('sentry-api-0-organization-member-details', args=[organization.slug, member_om.id]) self.login_as(self.user) resp = self.client.put(path, data={'reinvite': 1}) assert resp.status_code == 204 mock_send_sso_link_email.assert_called_once_with() @patch('sentry.models.OrganizationMember.send_sso_link_email') def test_cannot_reinvite_normal_member(self, mock_send_sso_link_email): self.login_as(user=self.user) organization = self.create_organization(name='foo', owner=self.user) member = self.create_user('bar@example.com') member_om = OrganizationMember.objects.create( organization=organization, user=member, type=OrganizationMemberType.MEMBER, has_global_access=False, ) path = reverse('sentry-api-0-organization-member-details', args=[organization.slug, member_om.id]) self.login_as(self.user) resp = self.client.put(path, data={'reinvite': 1}) assert resp.status_code == 400 class DeleteOrganizationMemberTest(APITestCase): def test_simple(self): self.login_as(user=self.user) organization = self.create_organization(name='foo', owner=self.user) member = self.create_user('bar@example.com') member_om = OrganizationMember.objects.create( organization=organization, user=member, type=OrganizationMemberType.MEMBER, has_global_access=False, ) path = reverse('sentry-api-0-organization-member-details', args=[organization.slug, member_om.id]) self.login_as(self.user) resp = self.client.delete(path) assert resp.status_code == 204 assert not OrganizationMember.objects.filter(id=member_om.id).exists() def test_cannot_delete_only_owner(self): self.login_as(user=self.user) organization = self.create_organization(name='foo', owner=self.user) # create a pending member, which shouldn't be counted in the checks OrganizationMember.objects.create( organization=organization, type=OrganizationMemberType.OWNER, email='bar@example.com', ) owner_om = OrganizationMember.objects.get( organization=organization, user=self.user, ) path = reverse('sentry-api-0-organization-member-details', args=[organization.slug, owner_om.id]) self.login_as(self.user) resp = self.client.delete(path) assert resp.status_code == 403 assert OrganizationMember.objects.filter(id=owner_om.id).exists()
bsd-3-clause
DES-SL/EasyLens
docs/conf.py
1
8340
# -*- coding: utf-8 -*- # # complexity documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) cwd = os.getcwd() parent = os.path.dirname(cwd) sys.path.insert(0, parent) import lensDES # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'DESlens' copyright = u'2015, ETH Zurich, Institute for Astronomy' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = lensDES.__version__ # The full version, including alpha/beta/rc tags. release = lensDES.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'lensDESdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'lensDES.tex', u'DESlens Documentation', u'Simon Birrer', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'lensDES', u'DESlens Documentation', [u'Simon Birrer'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'lensDES', u'DESlens Documentation', u'Simon Birrer', 'lensDES', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False try: import sphinx_eth_theme html_theme = "sphinx_eth_theme" html_theme_path = [sphinx_eth_theme.get_html_theme_path()] except ImportError: html_theme = 'default'
mit
yackj/GameAnalysis
test/gameio_test.py
1
26631
import copy import json import warnings import numpy as np import pytest from gameanalysis import gameio from gameanalysis import rsgame from gameanalysis import utils SERIAL = gameio.gameserializer(['role'], [['strat1', 'strat2']]) SERIAL2 = gameio.gameserializer(['a', 'b'], [['bar', 'foo'], ['baz']]) GAME = rsgame.samplegame( [2], [2], [[2, 0], [1, 1], [0, 2]], [ [[[-1, 0, 1], [0, 0, 0]], [[9, 10, 11], [21, 20, 19]]], [[[0, 0, 0, 0], [32, 28, 30, 30]]], ], ) BASEGAME_JSON = { 'players': { 'role': 2, }, 'strategies': { 'role': [ 'strat1', 'strat2', ], }, } GAME_JSON = { 'players': { 'role': 2, }, 'strategies': { 'role': [ 'strat1', 'strat2', ], }, 'profiles': [ { 'role': [ ('strat1', 2, 0.0), ], }, { 'role': [ ('strat1', 1, 10.0), ('strat2', 1, 20.0), ], }, { 'role': [ ('strat2', 2, 30.0), ], }, ], } SAMPLEGAME_JSON = { 'players': { 'role': 2, }, 'strategies': { 'role': [ 'strat1', 'strat2', ], }, 'profiles': [ { 'role': [ ('strat1', 2, [-1.0, 0.0, 1.0]), ], }, { 'role': [ ('strat1', 1, [9.0, 10.0, 11.0]), ('strat2', 1, [21.0, 20.0, 19.0]), ], }, { 'role': [ ('strat2', 2, [32.0, 28.0, 30.0, 30.0]), ], }, ], } EMPTYGAME_JSON = { 'roles': [ { 'name': 'role', 'strategies': [ 'strat1', 'strat2', ], 'count': 2, }, ], } SUMMARYGAME_JSON = { 'roles': [ { 'name': 'role', 'strategies': [ 'strat1', 'strat2', ], 'count': 2, }, ], 'profiles': [ { 'symmetry_groups': [ { 'payoff': 0, 'count': 2, 'strategy': 'strat1', 'role': 'role', }, ], }, { 'symmetry_groups': [ { 'payoff': 10, 'count': 1, 'strategy': 'strat1', 'role': 'role', }, { 'payoff': 20, 'count': 1, 'strategy': 'strat2', 'role': 'role', }, ], }, { 'symmetry_groups': [ { 'payoff': 30, 'count': 2, 'strategy': 'strat2', 'role': 'role', }, ], }, ], } OBSERVATIONGAME_JSON = { 'roles': [ { 'name': 'role', 'strategies': [ 'strat1', 'strat2', ], 'count': 2, }, ], 'profiles': [ { 'symmetry_groups': [ { 'strategy': 'strat1', 'id': 0, 'role': 'role', 'count': 2, }, ], 'observations': [ { 'symmetry_groups': [ { 'id': 0, 'payoff': -1, }, ], }, { 'symmetry_groups': [ { 'id': 0, 'payoff': 0, }, ], }, { 'symmetry_groups': [ { 'id': 0, 'payoff': 1, }, ], }, ], }, { 'symmetry_groups': [ { 'strategy': 'strat1', 'id': 1, 'role': 'role', 'count': 1, }, { 'strategy': 'strat2', 'id': 2, 'role': 'role', 'count': 1, }, ], 'observations': [ { 'symmetry_groups': [ { 'id': 1, 'payoff': 9, }, { 'id': 2, 'payoff': 21, }, ], }, { 'symmetry_groups': [ { 'id': 1, 'payoff': 10, }, { 'id': 2, 'payoff': 20, }, ], }, { 'symmetry_groups': [ { 'id': 1, 'payoff': 11, }, { 'id': 2, 'payoff': 19, }, ], }, ], }, { 'symmetry_groups': [ { 'strategy': 'strat2', 'id': 3, 'role': 'role', 'count': 2, }, ], 'observations': [ { 'symmetry_groups': [ { 'id': 3, 'payoff': 32, }, ], }, { 'symmetry_groups': [ { 'id': 3, 'payoff': 28, }, ], }, { 'symmetry_groups': [ { 'id': 3, 'payoff': 30, }, ], }, { 'symmetry_groups': [ { 'id': 3, 'payoff': 30, }, ], }, ], }, ], } FULLGAME_JSON = { 'roles': [ { 'name': 'role', 'strategies': [ 'strat1', 'strat2', ], 'count': 2, }, ], 'profiles': [ { 'symmetry_groups': [ { 'strategy': 'strat1', 'id': 0, 'role': 'role', 'count': 2, }, ], 'observations': [ { 'players': [ { 'sid': 0, 'p': -2, }, { 'sid': 0, 'p': 0, }, ], }, { 'players': [ { 'sid': 0, 'p': 0, }, { 'sid': 0, 'p': 0, }, ], }, { 'players': [ { 'sid': 0, 'p': 0, }, { 'sid': 0, 'p': 2, }, ], }, ], }, { 'symmetry_groups': [ { 'strategy': 'strat1', 'id': 1, 'role': 'role', 'count': 1, }, { 'strategy': 'strat2', 'id': 2, 'role': 'role', 'count': 1, }, ], 'observations': [ { 'players': [ { 'sid': 1, 'p': 9, }, { 'sid': 2, 'p': 21, }, ], }, { 'players': [ { 'sid': 1, 'p': 10, }, { 'sid': 2, 'p': 20, }, ], }, { 'players': [ { 'sid': 1, 'p': 11, }, { 'sid': 2, 'p': 19, }, ], }, ], }, { 'symmetry_groups': [ { 'strategy': 'strat2', 'id': 3, 'role': 'role', 'count': 2, }, ], 'observations': [ { 'players': [ { 'sid': 3, 'p': 32, }, { 'sid': 3, 'p': 32, }, ], }, { 'players': [ { 'sid': 3, 'p': 30, }, { 'sid': 3, 'p': 26, }, ], }, { 'players': [ { 'sid': 3, 'p': 34, }, { 'sid': 3, 'p': 26, }, ], }, { 'players': [ { 'sid': 3, 'p': 28, }, { 'sid': 3, 'p': 32, }, ], }, ], }, ], } @pytest.mark.parametrize('jgame', [BASEGAME_JSON, GAME_JSON, SAMPLEGAME_JSON, EMPTYGAME_JSON, SUMMARYGAME_JSON, OBSERVATIONGAME_JSON, FULLGAME_JSON]) def test_basegame_from_json(jgame): gameio.read_basegame(jgame) @pytest.mark.parametrize('jgame', [BASEGAME_JSON, GAME_JSON, SAMPLEGAME_JSON, EMPTYGAME_JSON, SUMMARYGAME_JSON, OBSERVATIONGAME_JSON, FULLGAME_JSON]) def test_game_from_json(jgame): gameio.read_game(jgame) @pytest.mark.parametrize('jgame', [BASEGAME_JSON, GAME_JSON, SAMPLEGAME_JSON, EMPTYGAME_JSON, SUMMARYGAME_JSON, OBSERVATIONGAME_JSON, FULLGAME_JSON]) def test_samplegame_from_json(jgame): gameio.read_samplegame(jgame) @pytest.mark.parametrize('jgame', [BASEGAME_JSON, GAME_JSON, SAMPLEGAME_JSON, EMPTYGAME_JSON, SUMMARYGAME_JSON, OBSERVATIONGAME_JSON, FULLGAME_JSON]) def test_basegame_equality(jgame): game, serial = gameio.read_basegame(jgame) assert game == rsgame.basegame_copy(GAME) assert serial == SERIAL @pytest.mark.parametrize('jgame', [GAME_JSON, SAMPLEGAME_JSON, SUMMARYGAME_JSON, OBSERVATIONGAME_JSON, FULLGAME_JSON]) def test_game_equality(jgame): game, serial = gameio.read_game(jgame) assert rsgame.game_copy(game) == rsgame.game_copy(GAME) assert serial == SERIAL @pytest.mark.parametrize('jgame', [SAMPLEGAME_JSON, OBSERVATIONGAME_JSON, FULLGAME_JSON]) def test_samplegame_equality(jgame): game, serial = gameio.read_samplegame(jgame) assert game == GAME assert serial == SERIAL def test_output(): EMPTYGAME_JSON = BASEGAME_JSON.copy() EMPTYGAME_JSON['profiles'] = [] SAMPLEDGAME_JSON = copy.deepcopy(GAME_JSON) for prof in SAMPLEDGAME_JSON['profiles']: for pays in prof.values(): pays[:] = [(s, c, [p]) for s, c, p in pays] assert BASEGAME_JSON == SERIAL.to_basegame_json(GAME) assert BASEGAME_JSON == SERIAL.to_basegame_json(rsgame.game_copy(GAME)) assert BASEGAME_JSON == SERIAL.to_basegame_json(rsgame.basegame_copy(GAME)) assert GAME_JSON == SERIAL.to_game_json(GAME) assert GAME_JSON == SERIAL.to_game_json(rsgame.game_copy(GAME)) assert EMPTYGAME_JSON == SERIAL.to_game_json(rsgame.basegame_copy(GAME)) assert SAMPLEGAME_JSON == SERIAL.to_samplegame_json(GAME) assert SAMPLEDGAME_JSON == SERIAL.to_samplegame_json( rsgame.game_copy(GAME)) assert EMPTYGAME_JSON == SERIAL.to_samplegame_json( rsgame.basegame_copy(GAME)) expected = """ BaseGame: Roles: role Players: 2x role Strategies: role: strat1 strat2 """[1:-1] assert expected == SERIAL.to_basegame_printstr(GAME) assert expected == SERIAL.to_basegame_printstr(rsgame.game_copy(GAME)) assert expected == SERIAL.to_basegame_printstr(rsgame.basegame_copy(GAME)) expected = """ BaseGame: Roles: a, b Players: 3x a 4x b Strategies: a: bar foo b: baz """[1:-1] assert expected == SERIAL2.to_basegame_printstr(rsgame.basegame( [3, 4], SERIAL2.num_strategies)) expected = """ Game: Roles: role Players: 2x role Strategies: role: strat1 strat2 payoff data for 3 out of 3 profiles """[1:-1] assert expected == SERIAL.to_game_printstr(GAME) assert expected == SERIAL.to_game_printstr(rsgame.game_copy(GAME)) expected = """ Game: Roles: role Players: 2x role Strategies: role: strat1 strat2 payoff data for 0 out of 3 profiles """[1:-1] assert expected == SERIAL.to_game_printstr(rsgame.basegame_copy(GAME)) expected = """ SampleGame: Roles: role Players: 2x role Strategies: role: strat1 strat2 payoff data for 3 out of 3 profiles 3 to 4 observations per profile """[1:-1] assert expected == SERIAL.to_samplegame_printstr(GAME) expected = """ SampleGame: Roles: role Players: 2x role Strategies: role: strat1 strat2 payoff data for 3 out of 3 profiles 1 observation per profile """[1:-1] assert expected == SERIAL.to_samplegame_printstr(rsgame.game_copy(GAME)) expected = """ SampleGame: Roles: role Players: 2x role Strategies: role: strat1 strat2 payoff data for 0 out of 3 profiles no observations """[1:-1] assert expected == SERIAL.to_samplegame_printstr( rsgame.basegame_copy(GAME)) @pytest.mark.parametrize('_', range(20)) def test_sorted_strategy_loading(_): with open('test/hard_nash_game_1.json') as f: _, serial = gameio.read_basegame(json.load(f)) assert utils.is_sorted(serial.role_names), \ "loaded json game didn't have sorted roles" assert all(utils.is_sorted(strats) for strats in serial.strat_names), \ "loaded json game didn't have sorted strategies" def test_to_from_prof_json(): prof = [6, 5, 3] json_prof = {'a': {'foo': 5, 'bar': 6}, 'b': {'baz': 3}} assert SERIAL2.to_prof_json(prof) == json_prof new_prof = SERIAL2.from_prof_json(json_prof) assert np.all(new_prof == prof) assert new_prof.dtype == int player_prof = {'players': [ {'role': 'a', 'strategy': 'foo', 'payoff': 0}, {'role': 'a', 'strategy': 'foo', 'payoff': 0}, {'role': 'a', 'strategy': 'foo', 'payoff': 0}, {'role': 'a', 'strategy': 'foo', 'payoff': 0}, {'role': 'a', 'strategy': 'foo', 'payoff': 0}, {'role': 'a', 'strategy': 'bar', 'payoff': 0}, {'role': 'a', 'strategy': 'bar', 'payoff': 0}, {'role': 'a', 'strategy': 'bar', 'payoff': 0}, {'role': 'a', 'strategy': 'bar', 'payoff': 0}, {'role': 'a', 'strategy': 'bar', 'payoff': 0}, {'role': 'a', 'strategy': 'bar', 'payoff': 0}, {'role': 'b', 'strategy': 'baz', 'payoff': 0}, {'role': 'b', 'strategy': 'baz', 'payoff': 0}, {'role': 'b', 'strategy': 'baz', 'payoff': 0}, ]} new_prof = SERIAL2.from_prof_json(player_prof) assert np.all(new_prof == prof) assert new_prof.dtype == int def test_to_from_payoff_json_roles(): pay = [1.0, 2.0, 3.0] json_pay = {'a': {'foo': 2.0, 'bar': 1.0}, 'b': {'baz': 3.0}} assert SERIAL2.to_payoff_json(pay) == json_pay new_pay = SERIAL2.from_payoff_json(json_pay) assert np.allclose(new_pay, pay) assert new_pay.dtype == float player_pay = {'players': [ {'role': 'a', 'strategy': 'foo', 'payoff': 4}, {'role': 'a', 'strategy': 'foo', 'payoff': 2}, {'role': 'a', 'strategy': 'foo', 'payoff': 0}, {'role': 'a', 'strategy': 'foo', 'payoff': 4}, {'role': 'a', 'strategy': 'foo', 'payoff': 0}, {'role': 'a', 'strategy': 'bar', 'payoff': 0}, {'role': 'a', 'strategy': 'bar', 'payoff': 2}, {'role': 'a', 'strategy': 'bar', 'payoff': 2}, {'role': 'a', 'strategy': 'bar', 'payoff': 0}, {'role': 'a', 'strategy': 'bar', 'payoff': 2}, {'role': 'a', 'strategy': 'bar', 'payoff': 0}, {'role': 'b', 'strategy': 'baz', 'payoff': 0}, {'role': 'b', 'strategy': 'baz', 'payoff': 6}, {'role': 'b', 'strategy': 'baz', 'payoff': 3}, ]} new_pay = SERIAL2.from_payoff_json(player_pay) assert np.allclose(new_pay, pay) assert new_pay.dtype == float def test_to_from_mix_json(): mix = [.6, .4, 1] json_mix = {'a': {'foo': .4, 'bar': .6}, 'b': {'baz': 1}} assert SERIAL2.to_mix_json(mix) == json_mix new_mix = SERIAL2.from_mix_json(json_mix) assert np.all(new_mix == mix) assert new_mix.dtype == float def test_to_from_subgame_json(): sub = [True, False, True] json_sub = {'a': ['bar'], 'b': ['baz']} assert SERIAL2.to_subgame_json(sub) == json_sub new_sub = SERIAL2.from_subgame_json(json_sub) assert np.all(new_sub == sub) assert new_sub.dtype == bool def test_to_from_prof_str(): prof = [6, 5, 3] prof_str = 'a: 5 foo, 6 bar; b: 3 baz' assert np.all(SERIAL2.from_prof_str(prof_str) == prof) assert set(SERIAL2.to_prof_str(prof)) == set(prof_str) def test_to_from_samplepay_json(): prof = [3, 0, 4] spay = [[3, 0, 7], [4, 0, 8], [5, 0, 9]] json_spay = {'a': {'bar': [3, 4, 5]}, 'b': {'baz': [7, 8, 9]}} json_spay_0 = {'a': {'bar': [3, 4, 5], 'foo': [0, 0, 0]}, 'b': {'baz': [7, 8, 9]}} assert SERIAL2.to_samplepay_json(spay, prof) == json_spay assert SERIAL2.to_samplepay_json(spay) == json_spay_0 assert np.allclose(SERIAL2.from_samplepay_json(json_spay), spay) with pytest.raises(AssertionError): SERIAL2.from_samplepay_json( json_spay, np.empty((0, SERIAL2.num_role_strats))) json_prof_spay = {'a': [('bar', 3, [3, 4, 5])], 'b': [('baz', 4, [7, 8, 9])]} with pytest.raises(AssertionError): SERIAL2.from_samplepay_json( json_prof_spay, np.empty((0, SERIAL2.num_role_strats))) def test_to_from_profsamplepay_json(): prof = [3, 0, 4] spay = [[3, 0, 7], [4, 0, 8], [5, 0, 9]] json_profspay = {'a': [('bar', 3, [3, 4, 5])], 'b': [('baz', 4, [7, 8, 9])]} assert SERIAL2.to_profsamplepay_json(spay, prof) == json_profspay p, sp = SERIAL2.from_profsamplepay_json(json_profspay) assert np.all(p == prof) assert np.allclose(sp, spay) def test_to_prof_printstr(): prof = [6, 5, 3] expected = """ a: bar: 6 foo: 5 b: baz: 3 """[1:] assert SERIAL2.to_prof_printstr(prof) == expected def test_to_from_mix_printstr(): mix = [0.3, 0.7, 1] expected = """ a: bar: 30.00% foo: 70.00% b: baz: 100.00% """[1:] assert SERIAL2.to_mix_printstr(mix) == expected def test_to_from_subgame_printstr(): sub = [True, False, True] expected = """ a: bar b: baz """[1:] assert SERIAL2.to_subgame_printstr(sub) == expected def test_to_from_role_json(): role = [6, 3] json_role = {'a': 6, 'b': 3} assert SERIAL2.to_role_json(role) == json_role assert np.all(SERIAL2.from_role_json(json_role) == role) assert SERIAL2.from_role_json(json_role).dtype == float def test_deviation_payoff_json(): prof = [3, 0, 4] devpay = [5] json_devpay = {'a': {'bar': {'foo': 5}}, 'b': {'baz': {}}} assert SERIAL2.to_deviation_payoff_json(devpay, prof) == json_devpay prof = [2, 1, 4] devpay = [5, 4] json_devpay = {'a': {'bar': {'foo': 5}, 'foo': {'bar': 4}}, 'b': {'baz': {}}} assert SERIAL2.to_deviation_payoff_json(devpay, prof) == json_devpay def test_to_pay_json(): jprof = SERIAL.to_payoff_json(GAME.payoffs[0], GAME.profiles[0]) assert jprof == {'role': {'strat1': 0}} jprof = SERIAL.to_payoff_json(GAME.payoffs[0]) assert jprof == {'role': {'strat1': 0, 'strat2': 0}} jprof = SERIAL.to_payoff_json(GAME.payoffs[1], GAME.profiles[1]) assert jprof == {'role': {'strat1': 10, 'strat2': 20}} jprof = SERIAL.to_payoff_json(GAME.payoffs[1]) assert jprof == {'role': {'strat1': 10, 'strat2': 20}} jprof = SERIAL.to_payoff_json(GAME.payoffs[2], GAME.profiles[2]) assert jprof == {'role': {'strat2': 30}} jprof = SERIAL.to_payoff_json(GAME.payoffs[2]) assert jprof == {'role': {'strat1': 0, 'strat2': 30}} jprof = SERIAL.to_profpay_json(GAME.payoffs[0], GAME.profiles[0]) assert jprof == {'role': [('strat1', 2, 0)]} jprof = {k: set(v) for k, v in SERIAL.to_profpay_json( GAME.payoffs[1], GAME.profiles[1]).items()} assert jprof == {'role': set([('strat1', 1, 10), ('strat2', 1, 20)])} jprof = SERIAL.to_profpay_json(GAME.payoffs[2], GAME.profiles[2]) assert jprof == {'role': [('strat2', 2, 30)]} @pytest.mark.parametrize('jgame', [GAME_JSON, SAMPLEGAME_JSON, SUMMARYGAME_JSON, OBSERVATIONGAME_JSON, FULLGAME_JSON]) def test_to_from_payoff_json(jgame): _, serial = gameio.read_basegame(jgame) payoffs = np.concatenate([serial.from_payoff_json(p)[None] for p in jgame['profiles']]) expected = [[0, 0], [10, 20], [0, 30]] assert np.allclose(expected, payoffs) def test_load_empty_observations(): serial = gameio.gameserializer(['a', 'b'], [['bar', 'foo'], ['baz']]) profile = { 'symmetry_groups': [ { 'strategy': 'bar', 'id': 0, 'role': 'a', 'count': 1, }, { 'strategy': 'baz', 'id': 1, 'role': 'b', 'count': 1, }, ], 'observations': [], } payoff = serial.from_payoff_json(profile) assert np.allclose(payoff, [np.nan, 0, np.nan], equal_nan=True) profile = { 'a': { 'bar': [] }, 'b': { 'baz': [] }, } payoff = serial.from_payoff_json(profile) assert np.allclose(payoff, [np.nan, 0, np.nan], equal_nan=True) def test_sorted_strategy_warning(): with pytest.raises(UserWarning), warnings.catch_warnings(): warnings.simplefilter('error') gameio.gameserializer(['role'], [['b', 'a']]) def test_invalid_game(): with pytest.raises(ValueError): SERIAL.from_basegame_json({}) with pytest.raises(ValueError): gameio.read_basegame({}) def test_repr(): assert repr(SERIAL) is not None def test_strat_name(): serial = gameio.gameserializer(['a', 'b'], [['e', 'q', 'w'], ['r', 't']]) for i, s in enumerate(['e', 'q', 'w', 'r', 't']): assert s == serial.strat_name(i) def test_index(): serial = gameio.gameserializer(['a', 'b'], [['e', 'q', 'w'], ['r', 't']]) assert 0 == serial.role_index('a') assert 1 == serial.role_index('b') assert 0 == serial.role_strat_index('a', 'e') assert 1 == serial.role_strat_index('a', 'q') assert 2 == serial.role_strat_index('a', 'w') assert 3 == serial.role_strat_index('b', 'r') assert 4 == serial.role_strat_index('b', 't') def test_serialization(): json.dumps(SERIAL.to_basegame_json(GAME)) json.dumps(SERIAL.to_game_json(GAME)) json.dumps(SERIAL.to_samplegame_json(GAME))
apache-2.0
fbradyirl/home-assistant
homeassistant/components/panasonic_viera/media_player.py
2
6691
"""Support for interface with a Panasonic Viera TV.""" import logging import voluptuous as vol from homeassistant.components.media_player import MediaPlayerDevice, PLATFORM_SCHEMA from homeassistant.components.media_player.const import ( MEDIA_TYPE_URL, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP, ) from homeassistant.const import ( CONF_HOST, CONF_MAC, CONF_NAME, CONF_PORT, STATE_OFF, STATE_ON, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_APP_POWER = "app_power" DEFAULT_NAME = "Panasonic Viera TV" DEFAULT_PORT = 55000 DEFAULT_APP_POWER = False SUPPORT_VIERATV = ( SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF | SUPPORT_PLAY | SUPPORT_PLAY_MEDIA | SUPPORT_STOP ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_MAC): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_APP_POWER, default=DEFAULT_APP_POWER): cv.boolean, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Panasonic Viera TV platform.""" from panasonic_viera import RemoteControl mac = config.get(CONF_MAC) name = config.get(CONF_NAME) port = config.get(CONF_PORT) app_power = config.get(CONF_APP_POWER) if discovery_info: _LOGGER.debug("%s", discovery_info) name = discovery_info.get("name") host = discovery_info.get("host") port = discovery_info.get("port") udn = discovery_info.get("udn") if udn and udn.startswith("uuid:"): uuid = udn[len("uuid:") :] else: uuid = None remote = RemoteControl(host, port) add_entities([PanasonicVieraTVDevice(mac, name, remote, host, app_power, uuid)]) return True host = config.get(CONF_HOST) remote = RemoteControl(host, port) add_entities([PanasonicVieraTVDevice(mac, name, remote, host, app_power)]) return True class PanasonicVieraTVDevice(MediaPlayerDevice): """Representation of a Panasonic Viera TV.""" def __init__(self, mac, name, remote, host, app_power, uuid=None): """Initialize the Panasonic device.""" import wakeonlan # Save a reference to the imported class self._wol = wakeonlan self._mac = mac self._name = name self._uuid = uuid self._muted = False self._playing = True self._state = None self._remote = remote self._host = host self._volume = 0 self._app_power = app_power @property def unique_id(self) -> str: """Return the unique ID of this Viera TV.""" return self._uuid def update(self): """Retrieve the latest data.""" try: self._muted = self._remote.get_mute() self._volume = self._remote.get_volume() / 100 self._state = STATE_ON except OSError: self._state = STATE_OFF def send_key(self, key): """Send a key to the tv and handles exceptions.""" try: self._remote.send_key(key) self._state = STATE_ON except OSError: self._state = STATE_OFF return False return True @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the state of the device.""" return self._state @property def volume_level(self): """Volume level of the media player (0..1).""" return self._volume @property def is_volume_muted(self): """Boolean if volume is currently muted.""" return self._muted @property def supported_features(self): """Flag media player features that are supported.""" if self._mac or self._app_power: return SUPPORT_VIERATV | SUPPORT_TURN_ON return SUPPORT_VIERATV def turn_on(self): """Turn on the media player.""" if self._mac: self._wol.send_magic_packet(self._mac, ip_address=self._host) self._state = STATE_ON elif self._app_power: self._remote.turn_on() self._state = STATE_ON def turn_off(self): """Turn off media player.""" if self._state != STATE_OFF: self._remote.turn_off() self._state = STATE_OFF def volume_up(self): """Volume up the media player.""" self._remote.volume_up() def volume_down(self): """Volume down media player.""" self._remote.volume_down() def mute_volume(self, mute): """Send mute command.""" self._remote.set_mute(mute) def set_volume_level(self, volume): """Set volume level, range 0..1.""" volume = int(volume * 100) try: self._remote.set_volume(volume) self._state = STATE_ON except OSError: self._state = STATE_OFF def media_play_pause(self): """Simulate play pause media player.""" if self._playing: self.media_pause() else: self.media_play() def media_play(self): """Send play command.""" self._playing = True self._remote.media_play() def media_pause(self): """Send media pause command to media player.""" self._playing = False self._remote.media_pause() def media_next_track(self): """Send next track command.""" self._remote.media_next_track() def media_previous_track(self): """Send the previous track command.""" self._remote.media_previous_track() def play_media(self, media_type, media_id, **kwargs): """Play media.""" _LOGGER.debug("Play media: %s (%s)", media_id, media_type) if media_type == MEDIA_TYPE_URL: try: self._remote.open_webpage(media_id) except (TimeoutError, OSError): self._state = STATE_OFF else: _LOGGER.warning("Unsupported media_type: %s", media_type) def media_stop(self): """Stop playback.""" self.send_key("NRC_CANCEL-ONOFF")
apache-2.0
tinfoil/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/thread/messagepump.py
151
2482
# Copyright (c) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. class MessagePumpDelegate(object): def schedule(self, interval, callback): raise NotImplementedError, "subclasses must implement" def message_available(self, message): raise NotImplementedError, "subclasses must implement" def final_message_delivered(self): raise NotImplementedError, "subclasses must implement" class MessagePump(object): interval = 10 # seconds def __init__(self, delegate, message_queue): self._delegate = delegate self._message_queue = message_queue self._schedule() def _schedule(self): self._delegate.schedule(self.interval, self._callback) def _callback(self): (messages, is_running) = self._message_queue.take_all() for message in messages: self._delegate.message_available(message) if not is_running: self._delegate.final_message_delivered() return self._schedule()
bsd-3-clause
osstech-jp/samba
source4/scripting/devel/speedtest.py
35
8192
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Unix SMB/CIFS implementation. # This speed test aims to show difference in execution time for bulk # creation of user objects. This will help us compare # Samba4 vs MS Active Directory performance. # Copyright (C) Zahari Zahariev <zahari.zahariev@postpath.com> 2010 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import optparse import sys import time import base64 from decimal import Decimal sys.path.insert(0, "bin/python") import samba from samba.tests.subunitrun import TestProgram, SubunitOptions import samba.getopt as options from ldb import SCOPE_BASE, SCOPE_SUBTREE from samba.ndr import ndr_unpack from samba.dcerpc import security from samba.auth import system_session from samba import gensec, sd_utils from samba.samdb import SamDB from samba.credentials import Credentials import samba.tests from samba.tests import delete_force parser = optparse.OptionParser("speedtest.py [options] <host>") sambaopts = options.SambaOptions(parser) parser.add_option_group(sambaopts) parser.add_option_group(options.VersionOptions(parser)) # use command line creds if available credopts = options.CredentialsOptions(parser) parser.add_option_group(credopts) subunitopts = SubunitOptions(parser) parser.add_option_group(subunitopts) opts, args = parser.parse_args() if len(args) < 1: parser.print_usage() sys.exit(1) host = args[0] lp = sambaopts.get_loadparm() creds = credopts.get_credentials(lp) creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL) # # Tests start here # class SpeedTest(samba.tests.TestCase): def find_domain_sid(self, ldb): res = ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_BASE) return ndr_unpack(security.dom_sid,res[0]["objectSid"][0]) def setUp(self): super(SpeedTest, self).setUp() self.ldb_admin = ldb self.base_dn = ldb.domain_dn() self.domain_sid = security.dom_sid(ldb.get_domain_sid()) self.user_pass = "samba123@" print "baseDN: %s" % self.base_dn def create_user(self, user_dn): ldif = """ dn: """ + user_dn + """ sAMAccountName: """ + user_dn.split(",")[0][3:] + """ objectClass: user unicodePwd:: """ + base64.b64encode(("\"%s\"" % self.user_pass).encode('utf-16-le')) + """ url: www.example.com """ self.ldb_admin.add_ldif(ldif) def create_group(self, group_dn, desc=None): ldif = """ dn: """ + group_dn + """ objectClass: group sAMAccountName: """ + group_dn.split(",")[0][3:] + """ groupType: 4 url: www.example.com """ self.ldb_admin.add_ldif(ldif) def create_bundle(self, count): for i in range(count): self.create_user("cn=speedtestuser%d,cn=Users,%s" % (i+1, self.base_dn)) def remove_bundle(self, count): for i in range(count): delete_force(self.ldb_admin, "cn=speedtestuser%d,cn=Users,%s" % (i+1, self.base_dn)) def remove_test_users(self): res = ldb.search(base="cn=Users,%s" % self.base_dn, expression="(objectClass=user)", scope=SCOPE_SUBTREE) dn_list = [item.dn for item in res if "speedtestuser" in str(item.dn)] for dn in dn_list: delete_force(self.ldb_admin, dn) class SpeedTestAddDel(SpeedTest): def setUp(self): super(SpeedTestAddDel, self).setUp() def run_bundle(self, num): print "\n=== Test ADD/DEL %s user objects ===\n" % num avg_add = Decimal("0.0") avg_del = Decimal("0.0") for x in [1, 2, 3]: start = time.time() self.create_bundle(num) res_add = Decimal( str(time.time() - start) ) avg_add += res_add print " Attempt %s ADD: %.3fs" % ( x, float(res_add) ) # start = time.time() self.remove_bundle(num) res_del = Decimal( str(time.time() - start) ) avg_del += res_del print " Attempt %s DEL: %.3fs" % ( x, float(res_del) ) print "Average ADD: %.3fs" % float( Decimal(avg_add) / Decimal("3.0") ) print "Average DEL: %.3fs" % float( Decimal(avg_del) / Decimal("3.0") ) print "" def test_00000(self): """ Remove possibly undeleted test users from previous test """ self.remove_test_users() def test_00010(self): self.run_bundle(10) def test_00100(self): self.run_bundle(100) def test_01000(self): self.run_bundle(1000) def _test_10000(self): """ This test should be enabled preferably against MS Active Directory. It takes quite the time against Samba4 (1-2 days). """ self.run_bundle(10000) class AclSearchSpeedTest(SpeedTest): def setUp(self): super(AclSearchSpeedTest, self).setUp() self.ldb_admin.newuser("acltestuser", "samba123@") self.sd_utils = sd_utils.SDUtils(self.ldb_admin) self.ldb_user = self.get_ldb_connection("acltestuser", "samba123@") self.user_sid = self.sd_utils.get_object_sid(self.get_user_dn("acltestuser")) def tearDown(self): super(AclSearchSpeedTest, self).tearDown() delete_force(self.ldb_admin, self.get_user_dn("acltestuser")) def run_search_bundle(self, num, _ldb): print "\n=== Creating %s user objects ===\n" % num self.create_bundle(num) mod = "(A;;LC;;;%s)(D;;RP;;;%s)" % (str(self.user_sid), str(self.user_sid)) for i in range(num): self.sd_utils.dacl_add_ace("cn=speedtestuser%d,cn=Users,%s" % (i+1, self.base_dn), mod) print "\n=== %s user objects created ===\n" % num print "\n=== Test search on %s user objects ===\n" % num avg_search = Decimal("0.0") for x in [1, 2, 3]: start = time.time() res = _ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_SUBTREE) res_search = Decimal( str(time.time() - start) ) avg_search += res_search print " Attempt %s SEARCH: %.3fs" % ( x, float(res_search) ) print "Average Search: %.3fs" % float( Decimal(avg_search) / Decimal("3.0") ) self.remove_bundle(num) def get_user_dn(self, name): return "CN=%s,CN=Users,%s" % (name, self.base_dn) def get_ldb_connection(self, target_username, target_password): creds_tmp = Credentials() creds_tmp.set_username(target_username) creds_tmp.set_password(target_password) creds_tmp.set_domain(creds.get_domain()) creds_tmp.set_realm(creds.get_realm()) creds_tmp.set_workstation(creds.get_workstation()) creds_tmp.set_gensec_features(creds_tmp.get_gensec_features() | gensec.FEATURE_SEAL) ldb_target = SamDB(url=host, credentials=creds_tmp, lp=lp) return ldb_target def test_search_01000(self): self.run_search_bundle(1000, self.ldb_admin) def test_search2_01000(self): # allow the user to see objects but not attributes, all attributes will be filtered out mod = "(A;;LC;;;%s)(D;;RP;;;%s)" % (str(self.user_sid), str(self.user_sid)) self.sd_utils.dacl_add_ace("CN=Users,%s" % self.base_dn, mod) self.run_search_bundle(1000, self.ldb_user) # Important unit running information if not "://" in host: host = "ldap://%s" % host ldb_options = ["modules:paged_searches"] ldb = SamDB(host, credentials=creds, session_info=system_session(), lp=lp, options=ldb_options) TestProgram(module=__name__, opts=subunitopts)
gpl-3.0
Qusic/ycmd
ycmd/tests/python/subcommands_test.py
3
6804
# Copyright (C) 2015 ycmd contributors # # This file is part of ycmd. # # ycmd is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ycmd is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ycmd. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function from __future__ import division # Not installing aliases from python-future; it's unreliable and slow. from builtins import * # noqa from hamcrest import assert_that from nose.tools import eq_ import os.path from ycmd.utils import ReadFile from ycmd.tests.python import PathToTestFile, SharedYcmd from ycmd.tests.test_utils import BuildRequest, ErrorMatcher @SharedYcmd def RunGoToTest( app, test ): filepath = PathToTestFile( test[ 'request' ][ 'filename' ] ) goto_data = BuildRequest( completer_target = 'filetype_default', command_arguments = [ 'GoTo' ], line_num = test[ 'request' ][ 'line_num' ], contents = ReadFile( filepath ), filetype = 'python', filepath = filepath ) eq_( test[ 'response' ], app.post_json( '/run_completer_command', goto_data ).json ) def Subcommands_GoTo_test(): # Tests taken from https://github.com/Valloric/YouCompleteMe/issues/1236 tests = [ { 'request': { 'filename': 'goto_file1.py', 'line_num': 2 }, 'response': { 'filepath': PathToTestFile( 'goto_file3.py' ), 'line_num': 1, 'column_num': 5 } }, { 'request': { 'filename': 'goto_file4.py', 'line_num': 2 }, 'response': { 'filepath': PathToTestFile( 'goto_file4.py' ), 'line_num': 1, 'column_num': 18 } } ] for test in tests: yield RunGoToTest, test @SharedYcmd def RunGoToTest_Variation_ZeroBasedLineAndColumn( app, test ): # Example taken directly from jedi docs # http://jedi.jedidjah.ch/en/latest/docs/plugin-api.html#examples contents = """ def my_func(): print 'called' alias = my_func my_list = [1, None, alias] inception = my_list[2] inception() """ goto_data = BuildRequest( completer_target = 'filetype_default', command_arguments = test[ 'command_arguments' ], line_num = 9, contents = contents, filetype = 'python', filepath = '/foo.py' ) eq_( test[ 'response' ], app.post_json( '/run_completer_command', goto_data ).json ) def Subcommands_GoTo_Variation_ZeroBasedLineAndColumn_test(): tests = [ { 'command_arguments': [ 'GoToDefinition' ], 'response': { 'filepath': os.path.abspath( '/foo.py' ), 'line_num': 2, 'column_num': 5 } }, { 'command_arguments': [ 'GoToDeclaration' ], 'response': { 'filepath': os.path.abspath( '/foo.py' ), 'line_num': 7, 'column_num': 1 } } ] for test in tests: yield RunGoToTest_Variation_ZeroBasedLineAndColumn, test @SharedYcmd def Subcommands_GoToDefinition_NotFound_test( app ): filepath = PathToTestFile( 'goto_file5.py' ) goto_data = BuildRequest( command_arguments = [ 'GoToDefinition' ], line_num = 4, contents = ReadFile( filepath ), filetype = 'python', filepath = filepath ) response = app.post_json( '/run_completer_command', goto_data, expect_errors = True ).json assert_that( response, ErrorMatcher( RuntimeError, "Can\'t jump to definition." ) ) @SharedYcmd def Subcommands_GetDoc_Method_test( app ): # Testcase1 filepath = PathToTestFile( 'GetDoc.py' ) contents = ReadFile( filepath ) event_data = BuildRequest( filepath = filepath, filetype = 'python', line_num = 17, column_num = 9, contents = contents, command_arguments = [ 'GetDoc' ], completer_target = 'filetype_default' ) response = app.post_json( '/run_completer_command', event_data ).json eq_( response, { 'detailed_info': '_ModuleMethod()\n\n' 'Module method docs\n' 'Are dedented, like you might expect', } ) @SharedYcmd def Subcommands_GetDoc_Class_test( app ): # Testcase1 filepath = PathToTestFile( 'GetDoc.py' ) contents = ReadFile( filepath ) event_data = BuildRequest( filepath = filepath, filetype = 'python', line_num = 19, column_num = 2, contents = contents, command_arguments = [ 'GetDoc' ], completer_target = 'filetype_default' ) response = app.post_json( '/run_completer_command', event_data ).json eq_( response, { 'detailed_info': 'Class Documentation', } ) @SharedYcmd def Subcommands_GoToReferences_test( app ): filepath = PathToTestFile( 'goto_references.py' ) contents = ReadFile( filepath ) event_data = BuildRequest( filepath = filepath, filetype = 'python', line_num = 4, column_num = 5, contents = contents, command_arguments = [ 'GoToReferences' ], completer_target = 'filetype_default' ) response = app.post_json( '/run_completer_command', event_data ).json eq_( response, [ { 'filepath': PathToTestFile( 'goto_references.py' ), 'column_num': 5, 'description': 'def f', 'line_num': 1 }, { 'filepath': PathToTestFile( 'goto_references.py' ), 'column_num': 5, 'description': 'a = f()', 'line_num': 4 }, { 'filepath': PathToTestFile( 'goto_references.py' ), 'column_num': 5, 'description': 'b = f()', 'line_num': 5 }, { 'filepath': PathToTestFile( 'goto_references.py' ), 'column_num': 5, 'description': 'c = f()', 'line_num': 6 } ] )
gpl-3.0
spaceof7/QGIS
python/plugins/processing/algs/qgis/RandomPointsExtent.py
7
5990
# -*- coding: utf-8 -*- """ *************************************************************************** RandomPointsExtent.py --------------------- Date : April 2014 Copyright : (C) 2014 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'April 2014' __copyright__ = '(C) 2014, Alexander Bruy' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os import random from qgis.PyQt.QtGui import QIcon from qgis.PyQt.QtCore import QVariant from qgis.core import (QgsField, QgsFeatureSink, QgsFeature, QgsFields, QgsGeometry, QgsPointXY, QgsWkbTypes, QgsSpatialIndex, QgsProcessing, QgsProcessingException, QgsProcessingParameterExtent, QgsProcessingParameterNumber, QgsProcessingParameterCrs, QgsProcessingParameterFeatureSink, QgsProcessingParameterDefinition) from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm from processing.tools import vector pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class RandomPointsExtent(QgisAlgorithm): EXTENT = 'EXTENT' POINTS_NUMBER = 'POINTS_NUMBER' MIN_DISTANCE = 'MIN_DISTANCE' TARGET_CRS = 'TARGET_CRS' OUTPUT = 'OUTPUT' def icon(self): return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'random_points.png')) def group(self): return self.tr('Vector creation') def groupId(self): return 'vectorcreation' def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.addParameter(QgsProcessingParameterExtent(self.EXTENT, self.tr('Input extent'))) self.addParameter(QgsProcessingParameterNumber(self.POINTS_NUMBER, self.tr('Number of points'), QgsProcessingParameterNumber.Integer, 1, False, 1, 1000000000)) self.addParameter(QgsProcessingParameterNumber(self.MIN_DISTANCE, self.tr('Minimum distance between points'), QgsProcessingParameterNumber.Double, 0, False, 0, 1000000000)) self.addParameter(QgsProcessingParameterCrs(self.TARGET_CRS, self.tr('Target CRS'), 'ProjectCrs')) self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Random points'), type=QgsProcessing.TypeVectorPoint)) def name(self): return 'randompointsinextent' def displayName(self): return self.tr('Random points in extent') def processAlgorithm(self, parameters, context, feedback): pointCount = self.parameterAsDouble(parameters, self.POINTS_NUMBER, context) minDistance = self.parameterAsDouble(parameters, self.MIN_DISTANCE, context) crs = self.parameterAsCrs(parameters, self.TARGET_CRS, context) bbox = self.parameterAsExtent(parameters, self.EXTENT, context, crs) extent = QgsGeometry().fromRect(bbox) fields = QgsFields() fields.append(QgsField('id', QVariant.Int, '', 10, 0)) (sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context, fields, QgsWkbTypes.Point, crs) nPoints = 0 nIterations = 0 maxIterations = pointCount * 200 total = 100.0 / pointCount if pointCount else 1 index = QgsSpatialIndex() points = dict() random.seed() while nIterations < maxIterations and nPoints < pointCount: if feedback.isCanceled(): break rx = bbox.xMinimum() + bbox.width() * random.random() ry = bbox.yMinimum() + bbox.height() * random.random() p = QgsPointXY(rx, ry) geom = QgsGeometry.fromPointXY(p) if geom.within(extent) and \ vector.checkMinDistance(p, index, minDistance, points): f = QgsFeature(nPoints) f.initAttributes(1) f.setFields(fields) f.setAttribute('id', nPoints) f.setGeometry(geom) sink.addFeature(f, QgsFeatureSink.FastInsert) index.insertFeature(f) points[nPoints] = p nPoints += 1 feedback.setProgress(int(nPoints * total)) nIterations += 1 if nPoints < pointCount: feedback.pushInfo(self.tr('Could not generate requested number of random points. ' 'Maximum number of attempts exceeded.')) return {self.OUTPUT: dest_id}
gpl-2.0
ArcherCraftStore/ArcherVMPeridot
Python/Lib/encodings/cp855.py
272
33850
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp855', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE 0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE 0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE 0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE 0x0084: 0x0451, # CYRILLIC SMALL LETTER IO 0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO 0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE 0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE 0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE 0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE 0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I 0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I 0x008c: 0x0457, # CYRILLIC SMALL LETTER YI 0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI 0x008e: 0x0458, # CYRILLIC SMALL LETTER JE 0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE 0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE 0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE 0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE 0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE 0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE 0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE 0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE 0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE 0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U 0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U 0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE 0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE 0x009c: 0x044e, # CYRILLIC SMALL LETTER YU 0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU 0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN 0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN 0x00a0: 0x0430, # CYRILLIC SMALL LETTER A 0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A 0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE 0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE 0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE 0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE 0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE 0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE 0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE 0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE 0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF 0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF 0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE 0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA 0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA 0x00b7: 0x0438, # CYRILLIC SMALL LETTER I 0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I 0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA 0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x00a4, # CURRENCY SIGN 0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL 0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL 0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM 0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM 0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN 0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN 0x00d6: 0x043e, # CYRILLIC SMALL LETTER O 0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O 0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE 0x00de: 0x044f, # CYRILLIC SMALL LETTER YA 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA 0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER 0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER 0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES 0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES 0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE 0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE 0x00e7: 0x0443, # CYRILLIC SMALL LETTER U 0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U 0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE 0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE 0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE 0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE 0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN 0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN 0x00ef: 0x2116, # NUMERO SIGN 0x00f0: 0x00ad, # SOFT HYPHEN 0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU 0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU 0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE 0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE 0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA 0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA 0x00f7: 0x044d, # CYRILLIC SMALL LETTER E 0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E 0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA 0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA 0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE 0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE 0x00fd: 0x00a7, # SECTION SIGN 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( '\x00' # 0x0000 -> NULL '\x01' # 0x0001 -> START OF HEADING '\x02' # 0x0002 -> START OF TEXT '\x03' # 0x0003 -> END OF TEXT '\x04' # 0x0004 -> END OF TRANSMISSION '\x05' # 0x0005 -> ENQUIRY '\x06' # 0x0006 -> ACKNOWLEDGE '\x07' # 0x0007 -> BELL '\x08' # 0x0008 -> BACKSPACE '\t' # 0x0009 -> HORIZONTAL TABULATION '\n' # 0x000a -> LINE FEED '\x0b' # 0x000b -> VERTICAL TABULATION '\x0c' # 0x000c -> FORM FEED '\r' # 0x000d -> CARRIAGE RETURN '\x0e' # 0x000e -> SHIFT OUT '\x0f' # 0x000f -> SHIFT IN '\x10' # 0x0010 -> DATA LINK ESCAPE '\x11' # 0x0011 -> DEVICE CONTROL ONE '\x12' # 0x0012 -> DEVICE CONTROL TWO '\x13' # 0x0013 -> DEVICE CONTROL THREE '\x14' # 0x0014 -> DEVICE CONTROL FOUR '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x0016 -> SYNCHRONOUS IDLE '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK '\x18' # 0x0018 -> CANCEL '\x19' # 0x0019 -> END OF MEDIUM '\x1a' # 0x001a -> SUBSTITUTE '\x1b' # 0x001b -> ESCAPE '\x1c' # 0x001c -> FILE SEPARATOR '\x1d' # 0x001d -> GROUP SEPARATOR '\x1e' # 0x001e -> RECORD SEPARATOR '\x1f' # 0x001f -> UNIT SEPARATOR ' ' # 0x0020 -> SPACE '!' # 0x0021 -> EXCLAMATION MARK '"' # 0x0022 -> QUOTATION MARK '#' # 0x0023 -> NUMBER SIGN '$' # 0x0024 -> DOLLAR SIGN '%' # 0x0025 -> PERCENT SIGN '&' # 0x0026 -> AMPERSAND "'" # 0x0027 -> APOSTROPHE '(' # 0x0028 -> LEFT PARENTHESIS ')' # 0x0029 -> RIGHT PARENTHESIS '*' # 0x002a -> ASTERISK '+' # 0x002b -> PLUS SIGN ',' # 0x002c -> COMMA '-' # 0x002d -> HYPHEN-MINUS '.' # 0x002e -> FULL STOP '/' # 0x002f -> SOLIDUS '0' # 0x0030 -> DIGIT ZERO '1' # 0x0031 -> DIGIT ONE '2' # 0x0032 -> DIGIT TWO '3' # 0x0033 -> DIGIT THREE '4' # 0x0034 -> DIGIT FOUR '5' # 0x0035 -> DIGIT FIVE '6' # 0x0036 -> DIGIT SIX '7' # 0x0037 -> DIGIT SEVEN '8' # 0x0038 -> DIGIT EIGHT '9' # 0x0039 -> DIGIT NINE ':' # 0x003a -> COLON ';' # 0x003b -> SEMICOLON '<' # 0x003c -> LESS-THAN SIGN '=' # 0x003d -> EQUALS SIGN '>' # 0x003e -> GREATER-THAN SIGN '?' # 0x003f -> QUESTION MARK '@' # 0x0040 -> COMMERCIAL AT 'A' # 0x0041 -> LATIN CAPITAL LETTER A 'B' # 0x0042 -> LATIN CAPITAL LETTER B 'C' # 0x0043 -> LATIN CAPITAL LETTER C 'D' # 0x0044 -> LATIN CAPITAL LETTER D 'E' # 0x0045 -> LATIN CAPITAL LETTER E 'F' # 0x0046 -> LATIN CAPITAL LETTER F 'G' # 0x0047 -> LATIN CAPITAL LETTER G 'H' # 0x0048 -> LATIN CAPITAL LETTER H 'I' # 0x0049 -> LATIN CAPITAL LETTER I 'J' # 0x004a -> LATIN CAPITAL LETTER J 'K' # 0x004b -> LATIN CAPITAL LETTER K 'L' # 0x004c -> LATIN CAPITAL LETTER L 'M' # 0x004d -> LATIN CAPITAL LETTER M 'N' # 0x004e -> LATIN CAPITAL LETTER N 'O' # 0x004f -> LATIN CAPITAL LETTER O 'P' # 0x0050 -> LATIN CAPITAL LETTER P 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q 'R' # 0x0052 -> LATIN CAPITAL LETTER R 'S' # 0x0053 -> LATIN CAPITAL LETTER S 'T' # 0x0054 -> LATIN CAPITAL LETTER T 'U' # 0x0055 -> LATIN CAPITAL LETTER U 'V' # 0x0056 -> LATIN CAPITAL LETTER V 'W' # 0x0057 -> LATIN CAPITAL LETTER W 'X' # 0x0058 -> LATIN CAPITAL LETTER X 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y 'Z' # 0x005a -> LATIN CAPITAL LETTER Z '[' # 0x005b -> LEFT SQUARE BRACKET '\\' # 0x005c -> REVERSE SOLIDUS ']' # 0x005d -> RIGHT SQUARE BRACKET '^' # 0x005e -> CIRCUMFLEX ACCENT '_' # 0x005f -> LOW LINE '`' # 0x0060 -> GRAVE ACCENT 'a' # 0x0061 -> LATIN SMALL LETTER A 'b' # 0x0062 -> LATIN SMALL LETTER B 'c' # 0x0063 -> LATIN SMALL LETTER C 'd' # 0x0064 -> LATIN SMALL LETTER D 'e' # 0x0065 -> LATIN SMALL LETTER E 'f' # 0x0066 -> LATIN SMALL LETTER F 'g' # 0x0067 -> LATIN SMALL LETTER G 'h' # 0x0068 -> LATIN SMALL LETTER H 'i' # 0x0069 -> LATIN SMALL LETTER I 'j' # 0x006a -> LATIN SMALL LETTER J 'k' # 0x006b -> LATIN SMALL LETTER K 'l' # 0x006c -> LATIN SMALL LETTER L 'm' # 0x006d -> LATIN SMALL LETTER M 'n' # 0x006e -> LATIN SMALL LETTER N 'o' # 0x006f -> LATIN SMALL LETTER O 'p' # 0x0070 -> LATIN SMALL LETTER P 'q' # 0x0071 -> LATIN SMALL LETTER Q 'r' # 0x0072 -> LATIN SMALL LETTER R 's' # 0x0073 -> LATIN SMALL LETTER S 't' # 0x0074 -> LATIN SMALL LETTER T 'u' # 0x0075 -> LATIN SMALL LETTER U 'v' # 0x0076 -> LATIN SMALL LETTER V 'w' # 0x0077 -> LATIN SMALL LETTER W 'x' # 0x0078 -> LATIN SMALL LETTER X 'y' # 0x0079 -> LATIN SMALL LETTER Y 'z' # 0x007a -> LATIN SMALL LETTER Z '{' # 0x007b -> LEFT CURLY BRACKET '|' # 0x007c -> VERTICAL LINE '}' # 0x007d -> RIGHT CURLY BRACKET '~' # 0x007e -> TILDE '\x7f' # 0x007f -> DELETE '\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE '\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE '\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE '\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE '\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO '\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO '\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE '\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE '\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE '\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE '\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I '\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I '\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI '\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI '\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE '\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE '\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE '\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE '\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE '\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE '\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE '\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE '\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE '\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE '\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U '\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U '\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE '\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE '\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU '\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU '\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN '\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN '\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A '\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A '\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE '\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE '\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE '\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE '\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE '\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE '\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE '\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE '\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF '\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF '\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE '\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2591' # 0x00b0 -> LIGHT SHADE '\u2592' # 0x00b1 -> MEDIUM SHADE '\u2593' # 0x00b2 -> DARK SHADE '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA '\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA '\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I '\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT '\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I '\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL '\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA '\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL '\xa4' # 0x00cf -> CURRENCY SIGN '\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL '\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL '\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM '\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM '\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN '\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN '\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O '\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O '\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT '\u2588' # 0x00db -> FULL BLOCK '\u2584' # 0x00dc -> LOWER HALF BLOCK '\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE '\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA '\u2580' # 0x00df -> UPPER HALF BLOCK '\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA '\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER '\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER '\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES '\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES '\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE '\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE '\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U '\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U '\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE '\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE '\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE '\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE '\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN '\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN '\u2116' # 0x00ef -> NUMERO SIGN '\xad' # 0x00f0 -> SOFT HYPHEN '\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU '\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU '\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE '\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE '\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA '\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA '\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E '\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E '\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA '\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA '\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE '\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE '\xa7' # 0x00fd -> SECTION SIGN '\u25a0' # 0x00fe -> BLACK SQUARE '\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a4: 0x00cf, # CURRENCY SIGN 0x00a7: 0x00fd, # SECTION SIGN 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ad: 0x00f0, # SOFT HYPHEN 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO 0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE 0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE 0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE 0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE 0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I 0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI 0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE 0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE 0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE 0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE 0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE 0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U 0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE 0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A 0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE 0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE 0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE 0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE 0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE 0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE 0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE 0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I 0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I 0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA 0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL 0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM 0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN 0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O 0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE 0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER 0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES 0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE 0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U 0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF 0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA 0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE 0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE 0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA 0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA 0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN 0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU 0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN 0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E 0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU 0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA 0x0430: 0x00a0, # CYRILLIC SMALL LETTER A 0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE 0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE 0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE 0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE 0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE 0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE 0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE 0x0438: 0x00b7, # CYRILLIC SMALL LETTER I 0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I 0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA 0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL 0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM 0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN 0x043e: 0x00d6, # CYRILLIC SMALL LETTER O 0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE 0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER 0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES 0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE 0x0443: 0x00e7, # CYRILLIC SMALL LETTER U 0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF 0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA 0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE 0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE 0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA 0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA 0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN 0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU 0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN 0x044d: 0x00f7, # CYRILLIC SMALL LETTER E 0x044e: 0x009c, # CYRILLIC SMALL LETTER YU 0x044f: 0x00de, # CYRILLIC SMALL LETTER YA 0x0451: 0x0084, # CYRILLIC SMALL LETTER IO 0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE 0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE 0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE 0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE 0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I 0x0457: 0x008c, # CYRILLIC SMALL LETTER YI 0x0458: 0x008e, # CYRILLIC SMALL LETTER JE 0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE 0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE 0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE 0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE 0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U 0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE 0x2116: 0x00ef, # NUMERO SIGN 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
apache-2.0
Vyt45/webas
node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py
1789
10585
# Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Xcode-ninja wrapper project file generator. This updates the data structures passed to the Xcode gyp generator to build with ninja instead. The Xcode project itself is transformed into a list of executable targets, each with a build step to build with ninja, and a target with every source and resource file. This appears to sidestep some of the major performance headaches experienced using complex projects and large number of targets within Xcode. """ import errno import gyp.generator.ninja import os import re import xml.sax.saxutils def _WriteWorkspace(main_gyp, sources_gyp, params): """ Create a workspace to wrap main and sources gyp paths. """ (build_file_root, build_file_ext) = os.path.splitext(main_gyp) workspace_path = build_file_root + '.xcworkspace' options = params['options'] if options.generator_output: workspace_path = os.path.join(options.generator_output, workspace_path) try: os.makedirs(workspace_path) except OSError, e: if e.errno != errno.EEXIST: raise output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \ '<Workspace version = "1.0">\n' for gyp_name in [main_gyp, sources_gyp]: name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj' name = xml.sax.saxutils.quoteattr("group:" + name) output_string += ' <FileRef location = %s></FileRef>\n' % name output_string += '</Workspace>\n' workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata") try: with open(workspace_file, 'r') as input_file: input_string = input_file.read() if input_string == output_string: return except IOError: # Ignore errors if the file doesn't exist. pass with open(workspace_file, 'w') as output_file: output_file.write(output_string) def _TargetFromSpec(old_spec, params): """ Create fake target for xcode-ninja wrapper. """ # Determine ninja top level build dir (e.g. /path/to/out). ninja_toplevel = None jobs = 0 if params: options = params['options'] ninja_toplevel = \ os.path.join(options.toplevel_dir, gyp.generator.ninja.ComputeOutputDir(params)) jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0) target_name = old_spec.get('target_name') product_name = old_spec.get('product_name', target_name) product_extension = old_spec.get('product_extension') ninja_target = {} ninja_target['target_name'] = target_name ninja_target['product_name'] = product_name if product_extension: ninja_target['product_extension'] = product_extension ninja_target['toolset'] = old_spec.get('toolset') ninja_target['default_configuration'] = old_spec.get('default_configuration') ninja_target['configurations'] = {} # Tell Xcode to look in |ninja_toplevel| for build products. new_xcode_settings = {} if ninja_toplevel: new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \ "%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel if 'configurations' in old_spec: for config in old_spec['configurations'].iterkeys(): old_xcode_settings = \ old_spec['configurations'][config].get('xcode_settings', {}) if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings: new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO" new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \ old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] ninja_target['configurations'][config] = {} ninja_target['configurations'][config]['xcode_settings'] = \ new_xcode_settings ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0) ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0) ninja_target['ios_watchkit_extension'] = \ old_spec.get('ios_watchkit_extension', 0) ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0) ninja_target['type'] = old_spec['type'] if ninja_toplevel: ninja_target['actions'] = [ { 'action_name': 'Compile and copy %s via ninja' % target_name, 'inputs': [], 'outputs': [], 'action': [ 'env', 'PATH=%s' % os.environ['PATH'], 'ninja', '-C', new_xcode_settings['CONFIGURATION_BUILD_DIR'], target_name, ], 'message': 'Compile and copy %s via ninja' % target_name, }, ] if jobs > 0: ninja_target['actions'][0]['action'].extend(('-j', jobs)) return ninja_target def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): """Limit targets for Xcode wrapper. Xcode sometimes performs poorly with too many targets, so only include proper executable targets, with filters to customize. Arguments: target_extras: Regular expression to always add, matching any target. executable_target_pattern: Regular expression limiting executable targets. spec: Specifications for target. """ target_name = spec.get('target_name') # Always include targets matching target_extras. if target_extras is not None and re.search(target_extras, target_name): return True # Otherwise just show executable targets. if spec.get('type', '') == 'executable' and \ spec.get('product_extension', '') != 'bundle': # If there is a filter and the target does not match, exclude the target. if executable_target_pattern is not None: if not re.search(executable_target_pattern, target_name): return False return True return False def CreateWrapper(target_list, target_dicts, data, params): """Initialize targets for the ninja wrapper. This sets up the necessary variables in the targets to generate Xcode projects that use ninja as an external builder. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. data: Dict of flattened build files keyed on gyp path. params: Dict of global options for gyp. """ orig_gyp = params['build_files'][0] for gyp_name, gyp_dict in data.iteritems(): if gyp_name == orig_gyp: depth = gyp_dict['_DEPTH'] # Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE # and prepend .ninja before the .gyp extension. generator_flags = params.get('generator_flags', {}) main_gyp = generator_flags.get('xcode_ninja_main_gyp', None) if main_gyp is None: (build_file_root, build_file_ext) = os.path.splitext(orig_gyp) main_gyp = build_file_root + ".ninja" + build_file_ext # Create new |target_list|, |target_dicts| and |data| data structures. new_target_list = [] new_target_dicts = {} new_data = {} # Set base keys needed for |data|. new_data[main_gyp] = {} new_data[main_gyp]['included_files'] = [] new_data[main_gyp]['targets'] = [] new_data[main_gyp]['xcode_settings'] = \ data[orig_gyp].get('xcode_settings', {}) # Normally the xcode-ninja generator includes only valid executable targets. # If |xcode_ninja_executable_target_pattern| is set, that list is reduced to # executable targets that match the pattern. (Default all) executable_target_pattern = \ generator_flags.get('xcode_ninja_executable_target_pattern', None) # For including other non-executable targets, add the matching target name # to the |xcode_ninja_target_pattern| regular expression. (Default none) target_extras = generator_flags.get('xcode_ninja_target_pattern', None) for old_qualified_target in target_list: spec = target_dicts[old_qualified_target] if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): # Add to new_target_list. target_name = spec.get('target_name') new_target_name = '%s:%s#target' % (main_gyp, target_name) new_target_list.append(new_target_name) # Add to new_target_dicts. new_target_dicts[new_target_name] = _TargetFromSpec(spec, params) # Add to new_data. for old_target in data[old_qualified_target.split(':')[0]]['targets']: if old_target['target_name'] == target_name: new_data_target = {} new_data_target['target_name'] = old_target['target_name'] new_data_target['toolset'] = old_target['toolset'] new_data[main_gyp]['targets'].append(new_data_target) # Create sources target. sources_target_name = 'sources_for_indexing' sources_target = _TargetFromSpec( { 'target_name' : sources_target_name, 'toolset': 'target', 'default_configuration': 'Default', 'mac_bundle': '0', 'type': 'executable' }, None) # Tell Xcode to look everywhere for headers. sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } } sources = [] for target, target_dict in target_dicts.iteritems(): base = os.path.dirname(target) files = target_dict.get('sources', []) + \ target_dict.get('mac_bundle_resources', []) for action in target_dict.get('actions', []): files.extend(action.get('inputs', [])) # Remove files starting with $. These are mostly intermediate files for the # build system. files = [ file for file in files if not file.startswith('$')] # Make sources relative to root build file. relative_path = os.path.dirname(main_gyp) sources += [ os.path.relpath(os.path.join(base, file), relative_path) for file in files ] sources_target['sources'] = sorted(set(sources)) # Put sources_to_index in it's own gyp. sources_gyp = \ os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp") fully_qualified_target_name = \ '%s:%s#target' % (sources_gyp, sources_target_name) # Add to new_target_list, new_target_dicts and new_data. new_target_list.append(fully_qualified_target_name) new_target_dicts[fully_qualified_target_name] = sources_target new_data_target = {} new_data_target['target_name'] = sources_target['target_name'] new_data_target['_DEPTH'] = depth new_data_target['toolset'] = "target" new_data[sources_gyp] = {} new_data[sources_gyp]['targets'] = [] new_data[sources_gyp]['included_files'] = [] new_data[sources_gyp]['xcode_settings'] = \ data[orig_gyp].get('xcode_settings', {}) new_data[sources_gyp]['targets'].append(new_data_target) # Write workspace to file. _WriteWorkspace(main_gyp, sources_gyp, params) return (new_target_list, new_target_dicts, new_data)
mit
hydroshare/hydroshare
hs_file_types/tests/test_raster_metadata.py
1
61525
import os from django.test import TransactionTestCase from django.db import IntegrityError from django.contrib.auth.models import Group from django.core.exceptions import ValidationError from rest_framework.exceptions import ValidationError as DRF_ValidationError from hs_core.testing import MockIRODSTestCaseMixin from hs_core import hydroshare from hs_core.models import Coverage, ResourceFile from hs_core.views.utils import move_or_rename_file_or_folder from hs_file_types.models import GeoRasterLogicalFile, GeoRasterFileMetaData, GenericLogicalFile from hs_file_types.models.base import METADATA_FILE_ENDSWITH, RESMAP_FILE_ENDSWITH from .utils import assert_raster_file_type_metadata, CompositeResourceTestMixin, \ get_path_with_no_file_extension from hs_geo_raster_resource.models import OriginalCoverage, CellInformation, BandInformation class RasterFileTypeTest(MockIRODSTestCaseMixin, TransactionTestCase, CompositeResourceTestMixin): def setUp(self): super(RasterFileTypeTest, self).setUp() self.group, _ = Group.objects.get_or_create(name='Hydroshare Author') self.user = hydroshare.create_account( 'user1@nowhere.com', username='user1', first_name='Creator_FirstName', last_name='Creator_LastName', superuser=False, groups=[self.group] ) self.res_title = 'Testing Raster File Type' # data files to use for testing self.logan_tif_1_file_name = 'logan1.tif' self.logan_tif_2_file_name = 'logan2.tif' self.logan_vrt_file_name = 'logan.vrt' self.logan_vrt_file_name2 = 'logan2.vrt' self.logan_tif_1_file = 'hs_file_types/tests/{}'.format(self.logan_tif_1_file_name) self.logan_tif_2_file = 'hs_file_types/tests/{}'.format(self.logan_tif_2_file_name) self.logan_vrt_file = 'hs_file_types/tests/{}'.format(self.logan_vrt_file_name) self.logan_vrt_file2 = 'hs_file_types/tests/{}'.format(self.logan_vrt_file_name2) self.raster_file_name = 'small_logan.tif' self.raster_zip_file_name = 'logan_vrt_small.zip' self.invalid_raster_file_name = 'raster_tif_invalid.tif' self.invalid_raster_zip_file_name = 'bad_small_vrt.zip' self.raster_file = 'hs_file_types/tests/{}'.format(self.raster_file_name) self.raster_zip_file = 'hs_file_types/tests/{}'.format(self.raster_zip_file_name) self.invalid_raster_file = 'hs_file_types/tests/{}'.format(self.invalid_raster_file_name) self.invalid_raster_zip_file = 'hs_file_types/tests/{}'.format( self.invalid_raster_zip_file_name) def test_create_aggregation_from_tif_file_1(self): # here we are using a valid raster tif file that exists at the root of the folder # hierarchy for setting it to Geo Raster file type which includes metadata extraction # a new folder should be created as part of the aggregation creation where the resource # files of the aggregation should live # location of raster file before aggregation: small_logan.tif # location of raster file after aggregation: samll_logan/small_logan.tif self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # set the tif file to GeoRasterLogicalFile type GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) for res_file in self.composite_resource.files.all(): print(res_file.short_path) self.assertEqual(self.composite_resource.files.all().count(), 2) # test extracted raster file type metadata assert_raster_file_type_metadata(self, aggr_folder_path='') res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertTrue(isinstance(logical_file, GeoRasterLogicalFile)) self.assertTrue(logical_file.metadata, GeoRasterFileMetaData) # there should not be any file level keywords at this point self.assertEqual(logical_file.metadata.keywords, []) self.composite_resource.delete() def test_create_aggregation_from_tif_file_2(self): # here we are using a valid raster tif file that exists in a folder # for setting it to Geo Raster file type - no new # folder should be created in this case # raster file location before aggregation is created: /raster_aggr/small_logan.tif # raster file location after aggregation is created: /raster_aggr/small_logan.tif self.create_composite_resource() # create a folder to place the tif file before creating an aggregation from the tif file new_folder = 'raster_aggr' ResourceFile.create_folder(self.composite_resource, new_folder) self.add_file_to_resource(file_to_add=self.raster_file, upload_folder=new_folder) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # set the tif file to GeoRasterLogicalFile type GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(self.composite_resource.files.all().count(), 2) # test extracted raster file type metadata assert_raster_file_type_metadata(self, aggr_folder_path=new_folder) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertTrue(isinstance(logical_file, GeoRasterLogicalFile)) self.assertTrue(logical_file.metadata, GeoRasterFileMetaData) # there should not be any file level keywords at this point self.assertEqual(logical_file.metadata.keywords, []) self.composite_resource.delete() def test_create_aggregation_from_tif_file_3(self): # here we are using a valid raster tif file that exists in a folder # for setting it to Geo Raster file type. The same folder contains another file # that is not going to be part of the raster aggregation # location raster file before aggregation is created: /my_folder/small_logan.tif # location of another file before aggregation is created: /my_folder/raster_tif_invalid.tif # location of raster file after aggregation is created: # /my_folder/small_logan.tif # location of another file after aggregation is created: /my_folder/raster_tif_invalid.tif self.create_composite_resource() # create a folder to place the tif file before creating an aggregation from the tif file new_folder = 'my_folder' ResourceFile.create_folder(self.composite_resource, new_folder) self.add_file_to_resource(file_to_add=self.raster_file, upload_folder=new_folder) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # add another file to the same folder self.add_file_to_resource(file_to_add=self.invalid_raster_file, upload_folder=new_folder) self.assertEqual(self.composite_resource.files.all().count(), 2) # set the tif file to GeoRasterLogicalFile type GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(self.composite_resource.files.all().count(), 3) # test logical file/aggregation self.assertEqual(len(list(self.composite_resource.logical_files)), 1) logical_file = list(self.composite_resource.logical_files)[0] self.assertEqual(logical_file.files.count(), 2) base_tif_file_name, _ = os.path.splitext(self.raster_file_name) expected_file_folder = new_folder for res_file in logical_file.files.all(): self.assertEqual(res_file.file_folder, expected_file_folder) self.assertTrue(isinstance(logical_file, GeoRasterLogicalFile)) self.assertTrue(logical_file.metadata, GeoRasterFileMetaData) # test the location of the file that's not part of the raster aggregation other_res_file = None for res_file in self.composite_resource.files.all(): if not res_file.has_logical_file: other_res_file = res_file break self.assertEqual(other_res_file.file_folder, new_folder) self.composite_resource.delete() def test_create_aggregation_from_tif_file_4(self): # here we are using a valid raster tif file that exists in a folder # for setting it to Geo Raster file type. The same folder contains another folder # a new folder should be created in this case to represent the raster aggregation # location raster file before aggregation is created: /my_folder/small_logan.tif # location of another file before aggregation is created: /my_folder/another_folder # location of raster file after aggregation is created: # /my_folder/small_logan/small_logan.tif self.create_composite_resource() # create a folder to place the tif file before creating an aggregation from the tif file new_folder = 'my_folder' ResourceFile.create_folder(self.composite_resource, new_folder) another_folder = '{}/another_folder'.format(new_folder) ResourceFile.create_folder(self.composite_resource, another_folder) self.add_file_to_resource(file_to_add=self.raster_file, upload_folder=new_folder) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # set the tif file to GeoRasterLogicalFile type GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(self.composite_resource.files.all().count(), 2) # test logical file/aggregation self.assertEqual(len(list(self.composite_resource.logical_files)), 1) logical_file = list(self.composite_resource.logical_files)[0] self.assertEqual(logical_file.files.count(), 2) base_tif_file_name, _ = os.path.splitext(self.raster_file_name) expected_file_folder = '{}'.format(new_folder) for res_file in logical_file.files.all(): self.assertEqual(res_file.file_folder, expected_file_folder) self.assertTrue(isinstance(logical_file, GeoRasterLogicalFile)) self.assertTrue(logical_file.metadata, GeoRasterFileMetaData) self.composite_resource.delete() def test_create_aggregation_from_zip_file_1(self): # here we are using a valid raster zip file that exist at the root of the folder hierarchy # for setting it to Geo Raster file type which includes metadata extraction # a new folder should be created in this case where the extracted files that are part of # the aggregation should exist # location of the zip file before aggregation: logan_vrt_small.zip # location of the tif file after aggregation: logan_vrt_small/small_logan.tif # location of the zip file after after aggregation: zip file should not exist self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_zip_file) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # check that the resource file is not associated with any logical file # self.assertEqual(res_file.has_logical_file, False) # set the zip file to GeoRasterFile type GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test aggregation base_file_name, _ = os.path.splitext(res_file.file_name) self._test_aggregation_from_zip_file(aggr_folder_path='') self.composite_resource.delete() def test_create_aggregation_from_zip_file_2(self): # here we are using a valid raster zip file that exist in a folder # for setting it to Geo Raster file type which includes metadata extraction # no new folder should be created in this case # location of the raster file before aggregation: raster-aggr/small_logan.tif # location of the raster file after aggregation: raster-aggr/small_logan.tif self.create_composite_resource() # create a folder to place the zip file before creating an aggregation from the zip file new_folder = 'raster_aggr' ResourceFile.create_folder(self.composite_resource, new_folder) self.add_file_to_resource(file_to_add=self.raster_zip_file, upload_folder=new_folder) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # check that the resource file is not associated with any logical file # self.assertEqual(res_file.has_logical_file, False) # set the zip file to GeoRasterFile type GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test aggregation self._test_aggregation_from_zip_file(aggr_folder_path=new_folder) self.composite_resource.delete() def test_create_aggregation_from_multiple_tif_with_vrt(self): """Here we are testing when there are multiple tif files along with a vrt file at the same directory location, using one of the tif files to create an aggregation, should result in a new aggregation that contains all the tif files and the vrt file""" self.create_composite_resource() self.add_file_to_resource(file_to_add=self.logan_tif_1_file) self.add_file_to_resource(file_to_add=self.logan_tif_2_file) res_file_tif = self.composite_resource.files.first() self.add_file_to_resource(file_to_add=self.logan_vrt_file) self.assertEqual(self.composite_resource.files.all().count(), 3) # check that the resource file is not associated with any logical file self.assertEqual(res_file_tif.has_logical_file, False) self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) # set the tif file to GeoRasterFile type GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file_tif.id) # test aggregation self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.has_logical_file, True) self.composite_resource.delete() def test_aggregation_validation(self): """Tests when a tif file is listed by more than one vrt file, validation should block the creation of the aggregation with appropriate messaging""" self.create_composite_resource() self.add_file_to_resource(file_to_add=self.logan_tif_1_file) self.add_file_to_resource(file_to_add=self.logan_tif_2_file) res_file_tif = self.composite_resource.files.first() self.add_file_to_resource(file_to_add=self.logan_vrt_file) self.add_file_to_resource(file_to_add=self.logan_vrt_file2) self.assertEqual(self.composite_resource.files.all().count(), 4) # check that the resource file is not associated with any logical file self.assertEqual(res_file_tif.has_logical_file, False) self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) # set the tif file to GeoRasterFile type with self.assertRaises(ValidationError) as validation_error: GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file_tif.id) print(validation_error.exception.message) self.assertTrue("is listed by more than one vrt file" in validation_error.exception.message) # test aggregation does not exist self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) self.composite_resource.delete() def test_create_aggregation_from_multiple_tif_without_vrt(self): """Here we are testing when there are multiple tif files and no vrt file at the same directory location, using one of the tif files to create an aggregation, should result in a new aggregation that contains only the selected tif """ self.create_composite_resource() self.add_file_to_resource(file_to_add=self.logan_tif_1_file) res_file_tif = self.composite_resource.files.first() self.add_file_to_resource(file_to_add=self.logan_tif_2_file) self.assertEqual(self.composite_resource.files.all().count(), 2) # check that the resource file is not associated with any logical file self.assertEqual(res_file_tif.has_logical_file, False) self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) # set the tif file to GeoRasterFile type GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file_tif.id) # test aggregation self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) self.assertEqual(res_file_tif.file_name, self.logan_tif_1_file_name) for res_file in self.composite_resource.files.all(): if res_file_tif.file_name == res_file.file_name or res_file.extension.lower() == '.vrt': self.assertEqual(res_file.has_logical_file, True) else: self.assertEqual(res_file.has_logical_file, False) self.composite_resource.delete() def test_create_aggregation_with_missing_tif_with_vrt(self): """Here we are testing when there is a vrt file, selecting a tif file from the same location for creating aggregation will fail if all tif files referenced in vrt file does not exist at that location """ self.create_composite_resource() self.add_file_to_resource(file_to_add=self.logan_tif_1_file) res_file_tif = self.composite_resource.files.first() self.add_file_to_resource(file_to_add=self.logan_vrt_file) self.assertEqual(self.composite_resource.files.all().count(), 2) # check that the resource file is not associated with any logical file self.assertEqual(res_file_tif.has_logical_file, False) self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) # set the tif file to GeoRasterFile type should raise exception with self.assertRaises(ValidationError): GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file_tif.id) # test aggregation self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) self.composite_resource.delete() def test_create_aggregation_with_extra_tif_with_vrt(self): """Here we are testing mutliple raster aggregations in the same folder. Two aggregations are added to the composite resource in the same folder """ self.create_composite_resource() self.add_file_to_resource(file_to_add=self.logan_tif_1_file) res_file_tif = self.composite_resource.files.first() self.add_file_to_resource(file_to_add=self.logan_tif_2_file) self.add_file_to_resource(file_to_add=self.logan_vrt_file) # add the extra tif file lone_tif_file = self.add_file_to_resource(file_to_add=self.raster_file) self.assertEqual(self.composite_resource.files.all().count(), 4) # check that the resource file is not associated with any logical file self.assertEqual(res_file_tif.has_logical_file, False) # test that raster aggregations may exist in the same folder next to each other self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file_tif.id) self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, lone_tif_file.id) self.assertEqual(GeoRasterLogicalFile.objects.count(), 2) self.composite_resource.delete() def test_set_file_type_to_geo_raster_invalid_file_1(self): # here we are using an invalid raster tif file for setting it # to Geo Raster file type which should fail self.create_composite_resource() self.add_file_to_resource(file_to_add=self.invalid_raster_file) self._test_invalid_file() self.composite_resource.delete() def test_set_file_type_to_geo_raster_invalid_file_2(self): # here we are using a raster tif file for setting it # to Geo Raster file type which already been previously set to this file type - should fail self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # set tif file to GeoRasterFileType GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # check that the resource file is associated with a logical file res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.tif')[0] self.assertEqual(res_file.has_logical_file, True) self.assertEqual(res_file.logical_file_type_name, "GeoRasterLogicalFile") # trying to set this tif file again to geo raster file type should raise # ValidationError with self.assertRaises(ValidationError): GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.composite_resource.delete() def test_set_file_type_to_geo_raster_invalid_file_3(self): # here we are using an invalid raster zip file for setting it # to Geo Raster file type - should fail self.create_composite_resource() self.add_file_to_resource(file_to_add=self.invalid_raster_zip_file) self._test_invalid_file() self.composite_resource.delete() def test_metadata_CRUD(self): # this is test metadata related to GeoRasterLogicalFile self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # extract metadata by setting to geo raster file type GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) res_file = self.composite_resource.files.first() # test that we can update raster specific metadata at the file level # test that we can update dataset_name of the logical file object logical_file = res_file.logical_file self.assertEqual(logical_file.dataset_name, 'small_logan') logical_file.dataset_name = "big_logan" logical_file.save() logical_file = res_file.logical_file self.assertEqual(logical_file.dataset_name, 'big_logan') # delete default original coverage metadata self.assertNotEqual(logical_file.metadata.originalCoverage, None) logical_file.metadata.originalCoverage.delete() # create new original coverage metadata with meaningful value value = {"northlimit": 12, "projection": "transverse_mercator", "units": "meter", "southlimit": 10, "eastlimit": 23, "westlimit": 2} logical_file.metadata.create_element('originalcoverage', value=value) self.assertEqual(logical_file.metadata.originalCoverage.value, value) # multiple original coverage elements are not allowed - should raise exception with self.assertRaises(IntegrityError): logical_file.metadata.create_element('originalcoverage', value=value) # delete default cell information element self.assertNotEqual(logical_file.metadata.cellInformation, None) logical_file.metadata.cellInformation.delete() # create new cell information metadata with meaningful value logical_file.metadata.create_element('cellinformation', name='cellinfo', cellDataType='Float32', rows=1660, columns=985, cellSizeXValue=30.0, cellSizeYValue=30.0, ) cell_info = logical_file.metadata.cellInformation self.assertEqual(cell_info.rows, 1660) self.assertEqual(cell_info.columns, 985) self.assertEqual(cell_info.cellSizeXValue, 30.0) self.assertEqual(cell_info.cellSizeYValue, 30.0) self.assertEqual(cell_info.cellDataType, 'Float32') # multiple cell Information elements are not allowed - should raise exception with self.assertRaises(IntegrityError): logical_file.metadata.create_element('cellinformation', name='cellinfo', cellDataType='Float32', rows=1660, columns=985, cellSizeXValue=30.0, cellSizeYValue=30.0, ) # delete default band information element self.assertNotEqual(logical_file.metadata.bandInformations, None) logical_file.metadata.bandInformations.first().delete() # create band information element with meaningful value logical_file.metadata.create_element('bandinformation', name='bandinfo', variableName='diginal elevation', variableUnit='meter', method='this is method', comment='this is comment', maximumValue=1000, minimumValue=0, noDataValue=-9999) band_info = logical_file.metadata.bandInformations.first() self.assertEqual(band_info.name, 'bandinfo') self.assertEqual(band_info.variableName, 'diginal elevation') self.assertEqual(band_info.variableUnit, 'meter') self.assertEqual(band_info.method, 'this is method') self.assertEqual(band_info.comment, 'this is comment') self.assertEqual(band_info.maximumValue, '1000') self.assertEqual(band_info.minimumValue, '0') self.assertEqual(band_info.noDataValue, '-9999') # multiple band information elements are allowed logical_file.metadata.create_element('bandinformation', name='bandinfo', variableName='diginal elevation2', variableUnit='meter', method='this is method', comment='this is comment', maximumValue=1000, minimumValue=0, noDataValue=-9999) self.assertEqual(logical_file.metadata.bandInformations.all().count(), 2) # test metadata delete # original coverage deletion is not allowed with self.assertRaises(ValidationError): logical_file.metadata.delete_element('originalcoverage', logical_file.metadata.originalCoverage.id) # cell information deletion is not allowed with self.assertRaises(ValidationError): logical_file.metadata.delete_element('cellinformation', logical_file.metadata.cellInformation.id) # band information deletion is not allowed with self.assertRaises(ValidationError): logical_file.metadata.delete_element('bandinformation', logical_file.metadata.bandInformations.first().id) # test metadata update # update original coverage element value_2 = {"northlimit": 12.5, "projection": "transverse_mercator", "units": "meter", "southlimit": 10.5, "eastlimit": 23.5, "westlimit": 2.5} logical_file.metadata.update_element('originalcoverage', logical_file.metadata.originalCoverage.id, value=value_2) self.assertEqual(logical_file.metadata.originalCoverage.value, value_2) # update cell info element logical_file.metadata.update_element('cellinformation', logical_file.metadata.cellInformation.id, name='cellinfo', cellDataType='Double', rows=166, columns=98, cellSizeXValue=3.0, cellSizeYValue=3.0, ) cell_info = logical_file.metadata.cellInformation self.assertEqual(cell_info.rows, 166) self.assertEqual(cell_info.columns, 98) self.assertEqual(cell_info.cellSizeXValue, 3.0) self.assertEqual(cell_info.cellSizeYValue, 3.0) self.assertEqual(cell_info.cellDataType, 'Double') # update band info element logical_file.metadata.update_element('bandinformation', logical_file.metadata.bandInformations.first().id, name='bandinfo', variableName='precipitation', variableUnit='mm/h', method='this is method2', comment='this is comment2', maximumValue=1001, minimumValue=1, noDataValue=-9998 ) band_info = logical_file.metadata.bandInformations.first() self.assertEqual(band_info.name, 'bandinfo') self.assertEqual(band_info.variableName, 'precipitation') self.assertEqual(band_info.variableUnit, 'mm/h') self.assertEqual(band_info.method, 'this is method2') self.assertEqual(band_info.comment, 'this is comment2') self.assertEqual(band_info.maximumValue, '1001') self.assertEqual(band_info.minimumValue, '1') self.assertEqual(band_info.noDataValue, '-9998') # test extra_metadata for the logical file # there should be no key/value metadata at this point self.assertEqual(logical_file.metadata.extra_metadata, {}) # create key/vale metadata logical_file.metadata.extra_metadata = {'key1': 'value 1', 'key2': 'value 2'} logical_file.metadata.save() self.assertEqual(logical_file.metadata.extra_metadata, {'key1': 'value 1', 'key2': 'value 2'}) # update key/value metadata logical_file.metadata.extra_metadata = {'key1': 'value 1', 'key2': 'value 2', 'key 3': 'value3'} logical_file.metadata.save() self.assertEqual(logical_file.metadata.extra_metadata, {'key1': 'value 1', 'key2': 'value 2', 'key 3': 'value3'}) # delete key/value metadata logical_file.metadata.extra_metadata = {} logical_file.metadata.save() self.assertEqual(logical_file.metadata.extra_metadata, {}) self.composite_resource.delete() def test_file_metadata_on_file_delete(self): # test that when any file in GeoRasterLogicalFile type is deleted # all metadata associated with GeoRasterFileMetaData is deleted # test for both .tif and .vrt delete # test with deleting of 'tif' file self._test_file_metadata_on_file_delete(ext='.tif') self.composite_resource.delete() # test with deleting of 'vrt' file self._test_file_metadata_on_file_delete(ext='.vrt') self.composite_resource.delete() def test_file_metadata_on_logical_file_delete(self): # test that when the GeoRasterFileType is deleted # all metadata associated with GeoRasterFileType is deleted self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) res_file = self.composite_resource.files.first() # create raster aggregation using the tif file GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that we have one logical file of type GeoRasterLogicalFileType as a result # of metadata extraction self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) self.assertEqual(GeoRasterFileMetaData.objects.count(), 1) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file # test that we have the metadata elements # there should be 2 Coverage objects - one at the resource level and # the other one at the file type level self.assertEqual(Coverage.objects.count(), 2) self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 1) self.assertEqual(logical_file.metadata.coverages.all().count(), 1) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertEqual(CellInformation.objects.count(), 1) self.assertEqual(BandInformation.objects.count(), 1) # delete the logical file logical_file.logical_delete(self.user) # test that we have no logical file of type GeoRasterFileType self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) self.assertEqual(GeoRasterFileMetaData.objects.count(), 0) # test that all metadata deleted - with resource coverage should still exist self.assertEqual(Coverage.objects.count(), 1) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(CellInformation.objects.count(), 0) self.assertEqual(BandInformation.objects.count(), 0) self.composite_resource.delete() def test_file_metadata_on_resource_delete(self): # test that when the composite resource is deleted # all metadata associated with GeoRasterFileType is deleted self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) res_file = self.composite_resource.files.first() # create raster aggregation using the tif file GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that we have one logical file of type GeoRasterFileType as a result # of metadata extraction self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) self.assertEqual(GeoRasterFileMetaData.objects.count(), 1) # test that we have the metadata elements # there should be 2 Coverage objects - one at the resource level and # the other one at the file type level self.assertEqual(Coverage.objects.count(), 2) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertEqual(CellInformation.objects.count(), 1) self.assertEqual(BandInformation.objects.count(), 1) # delete resource hydroshare.delete_resource(self.composite_resource.short_id) # test that we have no logical file of type GeoRasterFileType self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) self.assertEqual(GeoRasterFileMetaData.objects.count(), 0) # test that all metadata deleted self.assertEqual(Coverage.objects.count(), 0) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(CellInformation.objects.count(), 0) self.assertEqual(BandInformation.objects.count(), 0) def test_logical_file_delete(self): # test that when an instance GeoRasterFileType is deleted # all files associated with GeoRasterFileType is deleted self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) res_file = self.composite_resource.files.first() # extract metadata from the tif file GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that we have one logical file of type GeoRasterFileType as a result # of metadata extraction self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) logical_file = GeoRasterLogicalFile.objects.first() self.assertEqual(logical_file.files.all().count(), 2) self.assertEqual(self.composite_resource.files.all().count(), 2) self.assertEqual(set(self.composite_resource.files.all()), set(logical_file.files.all())) # delete the logical file using the custom delete function - logical_delete() logical_file.logical_delete(self.user) self.assertEqual(self.composite_resource.files.all().count(), 0) self.composite_resource.delete() def test_remove_aggregation(self): # test that when an instance GeoRasterLogicalFile (aggregation) is deleted # all files associated with that aggregation is not deleted but the associated metadata # is deleted self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) res_file = self.composite_resource.files.first() # set the tif file to GeoRasterLogicalFile (aggregation) GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that we have one logical file of type GeoRasterFileType self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) self.assertEqual(GeoRasterFileMetaData.objects.count(), 1) logical_file = GeoRasterLogicalFile.objects.first() self.assertEqual(logical_file.files.all().count(), 2) self.assertEqual(self.composite_resource.files.all().count(), 2) self.assertEqual(set(self.composite_resource.files.all()), set(logical_file.files.all())) # delete the aggregation (logical file) object using the remove_aggregation function logical_file.remove_aggregation() # test there is no GeoRasterLogicalFile object self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) # test there is no GeoRasterFileMetaData object self.assertEqual(GeoRasterFileMetaData.objects.count(), 0) # check the files associated with the aggregation not deleted self.assertEqual(self.composite_resource.files.all().count(), 2) self.composite_resource.delete() def test_content_file_delete(self): # test that when any file that is part of an instance GeoRasterFileType is deleted # all files associated with GeoRasterFileType is deleted # test deleting of tif file self._content_file_delete('.tif') # test deleting of vrt file self._content_file_delete('.vrt') self.composite_resource.delete() def test_aggregation_file_rename(self): # test that a file can't renamed for any resource file # that's part of the GeoRaster logical file self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) res_file = self.composite_resource.files.first() # create aggregation from the tif file GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test renaming of files that are associated with aggregation raises exception self.assertEqual(self.composite_resource.files.count(), 2) for res_file in self.composite_resource.files.all(): base_file_name, ext = os.path.splitext(res_file.file_name) src_path = 'data/contents/{}'.format(res_file.file_name) new_file_name = 'some_raster{}'.format(ext) self.assertNotEqual(res_file.file_name, new_file_name) tgt_path = 'data/contents/{}'.format(new_file_name) with self.assertRaises(DRF_ValidationError): move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) self.composite_resource.delete() def test_aggregation_file_move(self): # test any resource file that's part of the GeoRaster logical file can't be moved self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) res_file = self.composite_resource.files.first() # create the aggregation using the tif file GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test renaming of files that are associated with raster LFO - which should raise exception self.assertEqual(self.composite_resource.files.count(), 2) new_folder = 'georaster_aggr' ResourceFile.create_folder(self.composite_resource, new_folder) # moving any of the resource files to this new folder should raise exception tgt_path = 'data/contents/{}'.format(new_folder) for res_file in self.composite_resource.files.all(): with self.assertRaises(DRF_ValidationError): src_path = os.path.join('data', 'contents', res_file.short_path) move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) self.composite_resource.delete() def test_aggregation_folder_rename(self): # test changes to aggregation name, aggregation metadata xml file path, and aggregation # resource map xml file path on folder name change self.create_composite_resource() folder_for_raster = 'raster_folder' ResourceFile.create_folder(self.composite_resource, folder_for_raster) self.add_file_to_resource(file_to_add=self.raster_file, upload_folder=folder_for_raster) res_file = self.composite_resource.files.first() base_file_name, ext = os.path.splitext(res_file.file_name) # create aggregation from the tif file GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(self.composite_resource.files.count(), 2) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, folder_for_raster) # test aggregation name res_file = self.composite_resource.files.first() logical_file = res_file.logical_file aggregation_name = logical_file.aggregation_name # test aggregation xml file paths vrt_file_path = get_path_with_no_file_extension(aggregation_name) expected_meta_file_path = '{0}{1}'.format(vrt_file_path, METADATA_FILE_ENDSWITH) self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path) expected_map_file_path = '{0}{1}'.format(vrt_file_path, RESMAP_FILE_ENDSWITH) self.assertEqual(logical_file.map_short_file_path, expected_map_file_path) # test renaming folder src_path = 'data/contents/{}'.format(folder_for_raster) tgt_path = 'data/contents/{}_1'.format(folder_for_raster) move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, '{}_1'.format(folder_for_raster)) # test aggregation name update res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertNotEqual(logical_file.aggregation_name, aggregation_name) aggregation_name = logical_file.aggregation_name # test aggregation xml file paths vrt_file_path = get_path_with_no_file_extension(aggregation_name) expected_meta_file_path = '{0}{1}'.format(vrt_file_path, METADATA_FILE_ENDSWITH) self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path) expected_map_file_path = '{0}{1}'.format(vrt_file_path, RESMAP_FILE_ENDSWITH) self.assertEqual(logical_file.map_short_file_path, expected_map_file_path) self.composite_resource.delete() def test_aggregation_parent_folder_rename(self): # test changes to aggregation name, aggregation metadata xml file path, and aggregation # resource map xml file path on aggregation folder parent folder name change self.create_composite_resource() folder_for_raster = 'raster_folder' ResourceFile.create_folder(self.composite_resource, folder_for_raster) self.add_file_to_resource(file_to_add=self.raster_file, upload_folder=folder_for_raster) res_file = self.composite_resource.files.first() base_file_name, ext = os.path.splitext(res_file.file_name) # create aggregation from the tif file GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test renaming of files that are associated with aggregation raises exception self.assertEqual(self.composite_resource.files.count(), 2) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, folder_for_raster) # test aggregation name res_file = self.composite_resource.files.first() logical_file = res_file.logical_file aggregation_name = logical_file.aggregation_name # test aggregation xml file paths # test aggregation xml file paths vrt_file_path = get_path_with_no_file_extension(aggregation_name) expected_meta_file_path = '{0}{1}'.format(vrt_file_path, METADATA_FILE_ENDSWITH) self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path) expected_map_file_path = '{0}{1}'.format(vrt_file_path, RESMAP_FILE_ENDSWITH) self.assertEqual(logical_file.map_short_file_path, expected_map_file_path) # create a folder to be the parent folder of the aggregation folder parent_folder = 'parent_folder' ResourceFile.create_folder(self.composite_resource, parent_folder) # move the aggregation folder to the parent folder src_path = 'data/contents/{}'.format(folder_for_raster) tgt_path = 'data/contents/{0}/{1}'.format(parent_folder, folder_for_raster) move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) file_folder = '{}/{}'.format(parent_folder, folder_for_raster) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, file_folder) # renaming parent folder parent_folder_rename = 'parent_folder_1' src_path = 'data/contents/{}'.format(parent_folder) tgt_path = 'data/contents/{}'.format(parent_folder_rename) move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) file_folder = '{}/{}'.format(parent_folder_rename, folder_for_raster) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, file_folder) # test aggregation name after folder rename res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertNotEqual(logical_file.aggregation_name, aggregation_name) aggregation_name = logical_file.aggregation_name # test aggregation xml file paths after folder rename vrt_file_path = get_path_with_no_file_extension(aggregation_name) expected_meta_file_path = '{0}{1}'.format(vrt_file_path, METADATA_FILE_ENDSWITH) self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path) expected_map_file_path = '{0}{1}'.format(vrt_file_path, RESMAP_FILE_ENDSWITH) self.assertEqual(logical_file.map_short_file_path, expected_map_file_path) self.composite_resource.delete() def test_aggregation_folder_move(self): # test changes to aggregation name, aggregation metadata xml file path, and aggregation # resource map xml file path on aggregation folder move self.create_composite_resource() folder_for_raster = 'raster_folder' ResourceFile.create_folder(self.composite_resource, folder_for_raster) self.add_file_to_resource(file_to_add=self.raster_file, upload_folder=folder_for_raster) res_file = self.composite_resource.files.first() # create aggregation from the tif file GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(self.composite_resource.files.count(), 2) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, folder_for_raster) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file aggregation_name = logical_file.aggregation_name metadata_short_file_path = logical_file.metadata_short_file_path map_short_file_path = logical_file.map_short_file_path # create a folder to move the aggregation folder there parent_folder = 'parent_folder' ResourceFile.create_folder(self.composite_resource, parent_folder) # move the aggregation folder to the parent folder src_path = 'data/contents/{}'.format(folder_for_raster) tgt_path = 'data/contents/{0}/{1}'.format(parent_folder, folder_for_raster) move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path, tgt_path) file_folder = '{0}/{1}'.format(parent_folder, folder_for_raster) for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, file_folder) # test aggregation name update res_file = self.composite_resource.files.first() logical_file = res_file.logical_file self.assertNotEqual(logical_file.aggregation_name, aggregation_name) # test aggregation xml file paths self.assertNotEqual(logical_file.metadata_short_file_path, metadata_short_file_path) self.assertNotEqual(logical_file.map_short_file_path, map_short_file_path) self.composite_resource.delete() def _test_file_metadata_on_file_delete(self, ext): self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) res_file = self.composite_resource.files.first() self.assertEqual(Coverage.objects.count(), 0) # extract metadata from the tif file GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(Coverage.objects.count(), 2) # test that we have one logical file of type GeoRasterLogicalFile Type self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) self.assertEqual(GeoRasterFileMetaData.objects.count(), 1) res_file = self.composite_resource.files.first() logical_file = res_file.logical_file # there should be 1 coverage element of type spatial for the raster aggregation self.assertEqual(logical_file.metadata.coverages.all().count(), 1) self.assertEqual(logical_file.metadata.temporal_coverage, None) self.assertNotEqual(logical_file.metadata.spatial_coverage, None) self.assertNotEqual(logical_file.metadata.originalCoverage, None) self.assertNotEqual(logical_file.metadata.cellInformation, None) self.assertNotEqual(logical_file.metadata.bandInformations, None) # there should be 1 coverage for the resource self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 1) # there should be 2 coverage objects - one at the resource level # and the other one at the file type level self.assertEqual(Coverage.objects.count(), 2) self.assertEqual(OriginalCoverage.objects.count(), 1) self.assertEqual(CellInformation.objects.count(), 1) self.assertEqual(BandInformation.objects.count(), 1) # delete content file specified by extension (ext parameter) res_file_tif = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, ext)[0] hydroshare.delete_resource_file(self.composite_resource.short_id, res_file_tif.id, self.user) # test that we don't have logical file of type GeoRasterFileType self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) self.assertEqual(GeoRasterFileMetaData.objects.count(), 0) # test that all metadata deleted - with one coverage element at the resource level should # still exist self.assertEqual(Coverage.objects.count(), 1) self.assertEqual(OriginalCoverage.objects.count(), 0) self.assertEqual(CellInformation.objects.count(), 0) self.assertEqual(BandInformation.objects.count(), 0) def _content_file_delete(self, ext): # test that when any file that is part of an instance GeoRasterFileType is deleted # all files associated with GeoRasterFileType is deleted self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) res_file = self.composite_resource.files.first() # extract metadata from the tif file GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(self.composite_resource.files.all().count(), 2) self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) # delete the content file specified by the ext (file extension param) res_file_tif = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, ext)[0] hydroshare.delete_resource_file(self.composite_resource.short_id, res_file_tif.id, self.user) self.assertEqual(self.composite_resource.files.all().count(), 0) self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) def _test_invalid_file(self): self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is not associated with any logical file self.assertEqual(res_file.has_logical_file, False) # trying to set this invalid tif file to geo raster file type should raise # ValidationError with self.assertRaises(ValidationError): GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) # test that the invalid file did not get deleted self.assertEqual(self.composite_resource.files.all().count(), 1) # check that the resource file is not associated with generic logical file self.assertEqual(res_file.has_logical_file, False) def _test_aggregation_from_zip_file(self, aggr_folder_path): # test the resource now has 4 files (one vrt file and 2 tif files) and the original zip file self.assertEqual(self.composite_resource.files.all().count(), 4) tif_files = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.tif') self.assertEqual(len(tif_files), 2) vrt_files = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.vrt') self.assertEqual(len(vrt_files), 1) # check that the logicalfile is associated with 3 files self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) logical_file = GeoRasterLogicalFile.objects.first() res_file = self.composite_resource.files.first() expected_dataset_name, _ = os.path.splitext(res_file.file_name) self.assertEqual(logical_file.dataset_name, expected_dataset_name) self.assertEqual(logical_file.has_metadata, True) self.assertEqual(logical_file.files.all().count(), 3) self.assertEqual((self.composite_resource.files.count() - logical_file.files.count()), 1) # check that files in a folder if aggr_folder_path: for res_file in self.composite_resource.files.all(): self.assertEqual(res_file.file_folder, aggr_folder_path) # check that there is no GenericLogicalFile object self.assertEqual(GenericLogicalFile.objects.count(), 0) # test that size property of the logical file is equal to sun of size of all files # that are part of the logical file self.assertEqual(logical_file.size, sum([f.size for f in logical_file.files.all()])) # test extracted metadata for the file type # geo raster file type should have all the metadata elements self.assertEqual(logical_file.metadata.has_all_required_elements(), True) # there should be 1 coverage element - box type self.assertNotEqual(logical_file.metadata.spatial_coverage, None) self.assertEqual(logical_file.metadata.spatial_coverage.type, 'box') box_coverage = logical_file.metadata.spatial_coverage self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326') self.assertEqual(box_coverage.value['units'], 'Decimal degrees') self.assertEqual(box_coverage.value['northlimit'], 42.050028785767275) self.assertEqual(box_coverage.value['eastlimit'], -111.5773750264389) self.assertEqual(box_coverage.value['southlimit'], 41.98745777902698) self.assertEqual(box_coverage.value['westlimit'], -111.65768822411239) # testing extended metadata element: original coverage ori_coverage = logical_file.metadata.originalCoverage self.assertNotEqual(ori_coverage, None) self.assertEqual(ori_coverage.value['northlimit'], 4655492.446916306) self.assertEqual(ori_coverage.value['eastlimit'], 452174.01909127034) self.assertEqual(ori_coverage.value['southlimit'], 4648592.446916306) self.assertEqual(ori_coverage.value['westlimit'], 445574.01909127034) self.assertEqual(ori_coverage.value['units'], 'meter') self.assertEqual(ori_coverage.value['projection'], 'NAD83 / UTM zone 12N') # testing extended metadata element: cell information cell_info = logical_file.metadata.cellInformation self.assertEqual(cell_info.rows, 230) self.assertEqual(cell_info.columns, 220) self.assertEqual(cell_info.cellSizeXValue, 30.0) self.assertEqual(cell_info.cellSizeYValue, 30.0) self.assertEqual(cell_info.cellDataType, 'Float32') # testing extended metadata element: band information self.assertEqual(logical_file.metadata.bandInformations.count(), 1) band_info = logical_file.metadata.bandInformations.first() self.assertEqual(band_info.noDataValue, '-3.4028234663852886e+38') self.assertEqual(band_info.maximumValue, '2880.007080078125') self.assertEqual(band_info.minimumValue, '2274.958984375') def test_main_file(self): self.create_composite_resource() self.add_file_to_resource(file_to_add=self.raster_file) self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() GeoRasterLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id) self.assertEqual(1, GeoRasterLogicalFile.objects.count()) self.assertEqual(".vrt", GeoRasterLogicalFile.objects.first().get_main_file_type()) self.assertEqual("small_logan.vrt", GeoRasterLogicalFile.objects.first().get_main_file.file_name)
bsd-3-clause
arowla/scrapelib
scrapelib/__main__.py
6
1789
from . import Scraper, _user_agent import argparse def scrapeshell(): # pragma: no cover # clear argv for IPython import sys orig_argv = sys.argv[1:] sys.argv = sys.argv[:1] try: from IPython import embed except ImportError: print('scrapeshell requires ipython >= 0.11') return try: import lxml.html USE_LXML = True except ImportError: USE_LXML = False parser = argparse.ArgumentParser(prog='scrapeshell', description='interactive python shell for' ' scraping') parser.add_argument('url', help="url to scrape") parser.add_argument('--ua', dest='user_agent', default=_user_agent, help='user agent to make requests with') parser.add_argument('--robots', dest='robots', action='store_true', default=False, help='obey robots.txt') parser.add_argument('-p', '--postdata', dest='postdata', default=None, help="POST data (will make a POST instead of GET)") args = parser.parse_args(orig_argv) scraper = Scraper(follow_robots=args.robots) scraper.user_agent = args.user_agent url = args.url if args.postdata: html = scraper.urlopen(args.url, 'POST', args.postdata) else: html = scraper.urlopen(args.url) if USE_LXML: doc = lxml.html.fromstring(html.bytes) # noqa print('local variables') print('---------------') print('url: %s' % url) print('html: `scrapelib.ResultStr` instance') if USE_LXML: print('doc: `lxml HTML element`') else: print('doc not available: lxml not installed') embed() scrapeshell()
bsd-3-clause
fjorba/invenio
modules/websubmit/lib/functions/Print_Success_Approval_Request.py
39
1559
## This file is part of Invenio. ## Copyright (C) 2008, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """This is Print_Success_Approval_Request. It creates a "success message" that is shown to the user to indicate that their approval request has successfully been registered. """ __revision__ = "$Id$" def Print_Success_Approval_Request(parameters, curdir, form, user_info=None): """ This function creates a "success message" that is to be shown to the user to indicate that their approval request has successfully been registered. @parameters: None. @return: (string) - the "success" message for the user. """ text = """<br /> <div> The approval request for your document has successfully been registered and the referee has been informed.<br /> You will be notified by email when a decision has been made. </div> <br />""" return text
gpl-2.0
liu602348184/django
django/core/validators.py
99
11950
from __future__ import unicode_literals import re from django.core.exceptions import ValidationError from django.utils import six from django.utils.deconstruct import deconstructible from django.utils.encoding import force_text from django.utils.ipv6 import is_valid_ipv6_address from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit from django.utils.translation import ugettext_lazy as _, ungettext_lazy # These values, if given to validate(), will trigger the self.required check. EMPTY_VALUES = (None, '', [], (), {}) @deconstructible class RegexValidator(object): regex = '' message = _('Enter a valid value.') code = 'invalid' inverse_match = False flags = 0 def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None): if regex is not None: self.regex = regex if message is not None: self.message = message if code is not None: self.code = code if inverse_match is not None: self.inverse_match = inverse_match if flags is not None: self.flags = flags if self.flags and not isinstance(self.regex, six.string_types): raise TypeError("If the flags are set, regex must be a regular expression string.") # Compile the regex if it was not passed pre-compiled. if isinstance(self.regex, six.string_types): self.regex = re.compile(self.regex, self.flags) def __call__(self, value): """ Validates that the input matches the regular expression if inverse_match is False, otherwise raises ValidationError. """ if not (self.inverse_match is not bool(self.regex.search( force_text(value)))): raise ValidationError(self.message, code=self.code) def __eq__(self, other): return ( isinstance(other, RegexValidator) and self.regex.pattern == other.regex.pattern and self.regex.flags == other.regex.flags and (self.message == other.message) and (self.code == other.code) and (self.inverse_match == other.inverse_match) ) def __ne__(self, other): return not (self == other) @deconstructible class URLValidator(RegexValidator): ul = '\u00a1-\uffff' # unicode letters range (must be a unicode string, not a raw string) # IP patterns ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later) # Host patterns hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]*[a-z' + ul + r'0-9])?' domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]*(?<!-))*' tld_re = r'\.(?:[a-z' + ul + r']{2,}|xn--[a-z0-9]+)\.?' host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)' regex = re.compile( r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately r'(?:\S+(?::\S*)?@)?' # user:pass authentication r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')' r'(?::\d{2,5})?' # port r'(?:[/?#][^\s]*)?' # resource path r'\Z', re.IGNORECASE) message = _('Enter a valid URL.') schemes = ['http', 'https', 'ftp', 'ftps'] def __init__(self, schemes=None, **kwargs): super(URLValidator, self).__init__(**kwargs) if schemes is not None: self.schemes = schemes def __call__(self, value): value = force_text(value) # Check first if the scheme is valid scheme = value.split('://')[0].lower() if scheme not in self.schemes: raise ValidationError(self.message, code=self.code) # Then check full URL try: super(URLValidator, self).__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain if value: scheme, netloc, path, query, fragment = urlsplit(value) try: netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE except UnicodeError: # invalid domain part raise e url = urlunsplit((scheme, netloc, path, query, fragment)) super(URLValidator, self).__call__(url) else: raise else: # Now verify IPv6 in the netloc part host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc) if host_match: potential_ip = host_match.groups()[0] try: validate_ipv6_address(potential_ip) except ValidationError: raise ValidationError(self.message, code=self.code) url = value integer_validator = RegexValidator( re.compile('^-?\d+\Z'), message=_('Enter a valid integer.'), code='invalid', ) def validate_integer(value): return integer_validator(value) @deconstructible class EmailValidator(object): message = _('Enter a valid email address.') code = 'invalid' user_regex = re.compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string re.IGNORECASE) domain_regex = re.compile( # max length for domain name labels is 63 characters per RFC 1034 r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z', re.IGNORECASE) literal_regex = re.compile( # literal form, ipv4 or ipv6 address (SMTP 4.1.3) r'\[([A-f0-9:\.]+)\]\Z', re.IGNORECASE) domain_whitelist = ['localhost'] def __init__(self, message=None, code=None, whitelist=None): if message is not None: self.message = message if code is not None: self.code = code if whitelist is not None: self.domain_whitelist = whitelist def __call__(self, value): value = force_text(value) if not value or '@' not in value: raise ValidationError(self.message, code=self.code) user_part, domain_part = value.rsplit('@', 1) if not self.user_regex.match(user_part): raise ValidationError(self.message, code=self.code) if (domain_part not in self.domain_whitelist and not self.validate_domain_part(domain_part)): # Try for possible IDN domain-part try: domain_part = domain_part.encode('idna').decode('ascii') if self.validate_domain_part(domain_part): return except UnicodeError: pass raise ValidationError(self.message, code=self.code) def validate_domain_part(self, domain_part): if self.domain_regex.match(domain_part): return True literal_match = self.literal_regex.match(domain_part) if literal_match: ip_address = literal_match.group(1) try: validate_ipv46_address(ip_address) return True except ValidationError: pass return False def __eq__(self, other): return ( isinstance(other, EmailValidator) and (self.domain_whitelist == other.domain_whitelist) and (self.message == other.message) and (self.code == other.code) ) validate_email = EmailValidator() slug_re = re.compile(r'^[-a-zA-Z0-9_]+\Z') validate_slug = RegexValidator( slug_re, _("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."), 'invalid' ) slug_unicode_re = re.compile(r'^[-\w]+\Z', re.U) validate_unicode_slug = RegexValidator( slug_unicode_re, _("Enter a valid 'slug' consisting of Unicode letters, numbers, underscores, or hyphens."), 'invalid' ) ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z') validate_ipv4_address = RegexValidator(ipv4_re, _('Enter a valid IPv4 address.'), 'invalid') def validate_ipv6_address(value): if not is_valid_ipv6_address(value): raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid') def validate_ipv46_address(value): try: validate_ipv4_address(value) except ValidationError: try: validate_ipv6_address(value) except ValidationError: raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid') ip_address_validator_map = { 'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')), 'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')), 'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')), } def ip_address_validators(protocol, unpack_ipv4): """ Depending on the given parameters returns the appropriate validators for the GenericIPAddressField. This code is here, because it is exactly the same for the model and the form field. """ if protocol != 'both' and unpack_ipv4: raise ValueError( "You can only use `unpack_ipv4` if `protocol` is set to 'both'") try: return ip_address_validator_map[protocol.lower()] except KeyError: raise ValueError("The protocol '%s' is unknown. Supported: %s" % (protocol, list(ip_address_validator_map))) def int_list_validator(sep=',', message=None, code='invalid'): regexp = re.compile('^\d+(?:%s\d+)*\Z' % re.escape(sep)) return RegexValidator(regexp, message=message, code=code) validate_comma_separated_integer_list = int_list_validator( message=_('Enter only digits separated by commas.'), ) @deconstructible class BaseValidator(object): compare = lambda self, a, b: a is not b clean = lambda self, x: x message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).') code = 'limit_value' def __init__(self, limit_value, message=None): self.limit_value = limit_value if message: self.message = message def __call__(self, value): cleaned = self.clean(value) params = {'limit_value': self.limit_value, 'show_value': cleaned, 'value': value} if self.compare(cleaned, self.limit_value): raise ValidationError(self.message, code=self.code, params=params) def __eq__(self, other): return ( isinstance(other, self.__class__) and (self.limit_value == other.limit_value) and (self.message == other.message) and (self.code == other.code) ) @deconstructible class MaxValueValidator(BaseValidator): compare = lambda self, a, b: a > b message = _('Ensure this value is less than or equal to %(limit_value)s.') code = 'max_value' @deconstructible class MinValueValidator(BaseValidator): compare = lambda self, a, b: a < b message = _('Ensure this value is greater than or equal to %(limit_value)s.') code = 'min_value' @deconstructible class MinLengthValidator(BaseValidator): compare = lambda self, a, b: a < b clean = lambda self, x: len(x) message = ungettext_lazy( 'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'min_length' @deconstructible class MaxLengthValidator(BaseValidator): compare = lambda self, a, b: a > b clean = lambda self, x: len(x) message = ungettext_lazy( 'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'max_length'
bsd-3-clause
NSLS-II-SRX/ipython_ophyd
profile_xf05id1-noX11/startup/85-bs_callbacks.py
1
3670
# -*- coding: utf-8 -*- """ Created on Wed Feb 24 12:30:06 2016 @author: xf05id1 """ from bluesky.callbacks import CallbackBase,LivePlot #import os #import time as ttime #from databroker import DataBroker as db, get_events #from databroker.databroker import fill_event import filestore.api as fsapi #from metadatastore.commands import run_start_given_uid, descriptors_by_start #import matplotlib.pyplot as plt from xray_vision.backend.mpl.cross_section_2d import CrossSection #from .callbacks import CallbackBase #import numpy as np #import doct #from databroker import DataBroker as db i0_baseline = 7.24e-10 class NormalizeLivePlot(LivePlot): def __init__(self, *args, norm_key=None, **kwargs): super().__init__(*args, **kwargs) if norm_key is None: raise RuntimeError("norm key is required kwarg") self._norm = norm_key def event(self, doc): "Update line with data from this Event." try: if self.x is not None: # this try/except block is needed because multiple event streams # will be emitted by the RunEngine and not all event streams will # have the keys we want new_x = doc['data'][self.x] else: new_x = doc['seq_num'] new_y = doc['data'][self.y] new_norm = doc['data'][self._norm] except KeyError: # wrong event stream, skip it return self.y_data.append(new_y / abs(new_norm-i0_baseline)) self.x_data.append(new_x) self.current_line.set_data(self.x_data, self.y_data) # Rescale and redraw. self.ax.relim(visible_only=True) self.ax.autoscale_view(tight=True) self.ax.figure.canvas.draw_idle() #class LiveImagePiXi(CallbackBase): """ Stream 2D images in a cross-section viewer. Parameters ---------- field : string name of data field in an Event Note ---- Requires a matplotlib fix that is not released as of this writing. The relevant commit is a951b7. """ # def __init__(self, field): # super().__init__() # self.field = field # fig = plt.figure() # self.cs = CrossSection(fig) # self.cs._fig.show() # def event(self, doc): # #uid = doc['data'][self.field] # #data = fsapi.retrieve(uid) # data = doc['data']['pixi_image'] # self.cs.update_image(data) # self.cs._fig.canvas.draw() # self.cs._fig.canvas.flush_events() # def make_live_image(image_axes, key): """ Example p-------- fig, ax = plt.subplots() image_axes = ax.imshow(np.zeros((476, 512)), vmin=0, vmax=2) cb = make_live_image(image_axes, 'pixi_image_array_data') RE(Count([pixi]), subs={'event': [cb]}) """ def live_image(name, doc): if name != 'event': return image_axes.set_data(doc['data'][key].reshape(476, 512)) return live_image class SRXLiveImage(CallbackBase): """ Stream 2D images in a cross-section viewer. Parameters ---------- field : string name of data field in an Event Note ---- Requires a matplotlib fix that is not released as of this writing. The relevant commit is a951b7. """ def __init__(self, field): super().__init__() self.field = field fig = plt.figure() self.cs = CrossSection(fig) self.cs._fig.show() def event(self, doc): uid = doc['data'][self.field] data = fsapi.retrieve(uid) self.cs.update_image(data) self.cs._fig.canvas.draw_idle()
bsd-2-clause
sivaprakashniet/push_pull
p2p/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py
357
38875
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import absolute_import import os import re import sys if sys.version_info[0] < 3: from StringIO import StringIO string_types = basestring, text_type = unicode from types import FileType as file_type import __builtin__ as builtins import ConfigParser as configparser from ._backport import shutil from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, pathname2url, ContentTooShortError, splittype) def quote(s): if isinstance(s, unicode): s = s.encode('utf-8') return _quote(s) import urllib2 from urllib2 import (Request, urlopen, URLError, HTTPError, HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPSHandler, HTTPHandler, HTTPRedirectHandler, build_opener) import httplib import xmlrpclib import Queue as queue from HTMLParser import HTMLParser import htmlentitydefs raw_input = raw_input from itertools import ifilter as filter from itertools import ifilterfalse as filterfalse _userprog = None def splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" global _userprog if _userprog is None: import re _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: return match.group(1, 2) return None, host else: from io import StringIO string_types = str, text_type = str from io import TextIOWrapper as file_type import builtins import configparser import shutil from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, unquote, urlsplit, urlunsplit, splittype) from urllib.request import (urlopen, urlretrieve, Request, url2pathname, pathname2url, HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPSHandler, HTTPHandler, HTTPRedirectHandler, build_opener) from urllib.error import HTTPError, URLError, ContentTooShortError import http.client as httplib import urllib.request as urllib2 import xmlrpc.client as xmlrpclib import queue from html.parser import HTMLParser import html.entities as htmlentitydefs raw_input = input from itertools import filterfalse filter = filter try: from ssl import match_hostname, CertificateError except ImportError: class CertificateError(ValueError): pass def _dnsname_to_pat(dn): pats = [] for frag in dn.split(r'.'): if frag == '*': # When '*' is a fragment by itself, it matches a non-empty # dotless fragment. pats.append('[^.]+') else: # Otherwise, '*' matches any dotless fragment. frag = re.escape(frag) pats.append(frag.replace(r'\*', '[^.]*')) return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules are mostly followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_to_pat(value).match(hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_to_pat(value).match(hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found") try: from types import SimpleNamespace as Container except ImportError: class Container(object): """ A generic container for when multiple values need to be returned """ def __init__(self, **kwargs): self.__dict__.update(kwargs) try: from shutil import which except ImportError: # Implementation from Python 3.3 def which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if not os.curdir in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if not normdir in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None # ZipFile is a context manager in 2.7, but not in 2.6 from zipfile import ZipFile as BaseZipFile if hasattr(BaseZipFile, '__enter__'): ZipFile = BaseZipFile else: from zipfile import ZipExtFile as BaseZipExtFile class ZipExtFile(BaseZipExtFile): def __init__(self, base): self.__dict__.update(base.__dict__) def __enter__(self): return self def __exit__(self, *exc_info): self.close() # return None, so if an exception occurred, it will propagate class ZipFile(BaseZipFile): def __enter__(self): return self def __exit__(self, *exc_info): self.close() # return None, so if an exception occurred, it will propagate def open(self, *args, **kwargs): base = BaseZipFile.open(self, *args, **kwargs) return ZipExtFile(base) try: from platform import python_implementation except ImportError: # pragma: no cover def python_implementation(): """Return a string identifying the Python implementation.""" if 'PyPy' in sys.version: return 'PyPy' if os.name == 'java': return 'Jython' if sys.version.startswith('IronPython'): return 'IronPython' return 'CPython' try: import sysconfig except ImportError: # pragma: no cover from ._backport import sysconfig try: callable = callable except NameError: # pragma: no cover from collections import Callable def callable(obj): return isinstance(obj, Callable) try: fsencode = os.fsencode fsdecode = os.fsdecode except AttributeError: # pragma: no cover _fsencoding = sys.getfilesystemencoding() if _fsencoding == 'mbcs': _fserrors = 'strict' else: _fserrors = 'surrogateescape' def fsencode(filename): if isinstance(filename, bytes): return filename elif isinstance(filename, text_type): return filename.encode(_fsencoding, _fserrors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) def fsdecode(filename): if isinstance(filename, text_type): return filename elif isinstance(filename, bytes): return filename.decode(_fsencoding, _fserrors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) try: from tokenize import detect_encoding except ImportError: # pragma: no cover from codecs import BOM_UTF8, lookup import re cookie_re = re.compile("coding[:=]\s*([-\w.]+)") def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argment, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ try: filename = readline.__self__.name except AttributeError: filename = None bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: # Decode as UTF-8. Either the line is an encoding declaration, # in which case it should be pure ASCII, or it must be UTF-8 # per default encoding. line_string = line.decode('utf-8') except UnicodeDecodeError: msg = "invalid or missing encoding declaration" if filename is not None: msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) matches = cookie_re.findall(line_string) if not matches: return None encoding = _get_normal_name(matches[0]) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter if filename is None: msg = "unknown encoding: " + encoding else: msg = "unknown encoding for {!r}: {}".format(filename, encoding) raise SyntaxError(msg) if bom_found: if codec.name != 'utf-8': # This behaviour mimics the Python interpreter if filename is None: msg = 'encoding problem: utf-8' else: msg = 'encoding problem for {!r}: utf-8'.format(filename) raise SyntaxError(msg) encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] # For converting & <-> &amp; etc. try: from html import escape except ImportError: from cgi import escape if sys.version_info[:2] < (3, 4): unescape = HTMLParser().unescape else: from html import unescape try: from collections import ChainMap except ImportError: # pragma: no cover from collections import MutableMapping try: from reprlib import recursive_repr as _recursive_repr except ImportError: def _recursive_repr(fillvalue='...'): ''' Decorator to make a repr function return fillvalue for a recursive call ''' def decorating_function(user_function): repr_running = set() def wrapper(self): key = id(self), get_ident() if key in repr_running: return fillvalue repr_running.add(key) try: result = user_function(self) finally: repr_running.discard(key) return result # Can't use functools.wraps() here because of bootstrap issues wrapper.__module__ = getattr(user_function, '__module__') wrapper.__doc__ = getattr(user_function, '__doc__') wrapper.__name__ = getattr(user_function, '__name__') wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) return wrapper return decorating_function class ChainMap(MutableMapping): ''' A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. ''' def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. ''' self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) def __getitem__(self, key): for mapping in self.maps: try: return mapping[key] # can't use 'key in mapping' with defaultdict except KeyError: pass return self.__missing__(key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): return len(set().union(*self.maps)) # reuses stored hash values if possible def __iter__(self): return iter(set().union(*self.maps)) def __contains__(self, key): return any(key in m for m in self.maps) def __bool__(self): return any(self.maps) @_recursive_repr() def __repr__(self): return '{0.__class__.__name__}({1})'.format( self, ', '.join(map(repr, self.maps))) @classmethod def fromkeys(cls, iterable, *args): 'Create a ChainMap with a single dict created from the iterable.' return cls(dict.fromkeys(iterable, *args)) def copy(self): 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy def new_child(self): # like Django's Context.push() 'New ChainMap with a new dict followed by all previous maps.' return self.__class__({}, *self.maps) @property def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) def __setitem__(self, key, value): self.maps[0][key] = value def __delitem__(self, key): try: del self.maps[0][key] except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].popitem() except KeyError: raise KeyError('No keys found in the first mapping.') def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() try: from imp import cache_from_source except ImportError: # pragma: no cover def cache_from_source(path, debug_override=None): assert path.endswith('.py') if debug_override is None: debug_override = __debug__ if debug_override: suffix = 'c' else: suffix = 'o' return path + suffix try: from collections import OrderedDict except ImportError: # pragma: no cover ## {{{ http://code.activestate.com/recipes/576693/ (r9) # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running=None): 'od.__repr__() <==> repr(od)' if not _repr_running: _repr_running = {} call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self) try: from logging.config import BaseConfigurator, valid_ident except ImportError: # pragma: no cover IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) def valid_ident(s): m = IDENTIFIER.match(s) if not m: raise ValueError('Not a valid Python identifier: %r' % s) return True # The ConvertingXXX classes are wrappers around standard Python containers, # and they serve to convert any suitable values in the container. The # conversion converts base dicts, lists and tuples to their wrapped # equivalents, whereas strings which match a conversion format are converted # appropriately. # # Each wrapper should have a configurator attribute holding the actual # configurator to use for conversion. class ConvertingDict(dict): """A converting dictionary wrapper.""" def __getitem__(self, key): value = dict.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def get(self, key, default=None): value = dict.get(self, key, default) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, key, default=None): value = dict.pop(self, key, default) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class ConvertingList(list): """A converting list wrapper.""" def __getitem__(self, key): value = list.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, idx=-1): value = list.pop(self, idx) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self return result class ConvertingTuple(tuple): """A converting tuple wrapper.""" def __getitem__(self, key): value = tuple.__getitem__(self, key) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class BaseConfigurator(object): """ The configurator base class which defines some useful defaults. """ CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$') WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') DIGIT_PATTERN = re.compile(r'^\d+$') value_converters = { 'ext' : 'ext_convert', 'cfg' : 'cfg_convert', } # We might want to use a different one, e.g. importlib importer = staticmethod(__import__) def __init__(self, config): self.config = ConvertingDict(config) self.config.configurator = self def resolve(self, s): """ Resolve strings to objects using standard import and attribute syntax. """ name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v def ext_convert(self, value): """Default converter for the ext:// protocol.""" return self.resolve(value) def cfg_convert(self, value): """Default converter for the cfg:// protocol.""" rest = value m = self.WORD_PATTERN.match(rest) if m is None: raise ValueError("Unable to convert %r" % value) else: rest = rest[m.end():] d = self.config[m.groups()[0]] #print d, rest while rest: m = self.DOT_PATTERN.match(rest) if m: d = d[m.groups()[0]] else: m = self.INDEX_PATTERN.match(rest) if m: idx = m.groups()[0] if not self.DIGIT_PATTERN.match(idx): d = d[idx] else: try: n = int(idx) # try as number first (most likely) d = d[n] except TypeError: d = d[idx] if m: rest = rest[m.end():] else: raise ValueError('Unable to convert ' '%r at %r' % (value, rest)) #rest should be empty return d def convert(self, value): """ Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. """ if not isinstance(value, ConvertingDict) and isinstance(value, dict): value = ConvertingDict(value) value.configurator = self elif not isinstance(value, ConvertingList) and isinstance(value, list): value = ConvertingList(value) value.configurator = self elif not isinstance(value, ConvertingTuple) and\ isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, string_types): m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value def configure_custom(self, config): """Configure an object with a user-supplied factory.""" c = config.pop('()') if not callable(c): c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) result = c(**kwargs) if props: for name, value in props.items(): setattr(result, name, value) return result def as_tuple(self, value): """Utility function which converts lists to tuples.""" if isinstance(value, list): value = tuple(value) return value
bsd-3-clause
ttglennhall/DjangoGirlsTutorial
myvenv/lib/python3.4/site-packages/pip/commands/install.py
342
12694
import os import sys import tempfile import shutil from pip.req import InstallRequirement, RequirementSet, parse_requirements from pip.log import logger from pip.locations import (src_prefix, virtualenv_no_global, distutils_scheme, build_prefix) from pip.basecommand import Command from pip.index import PackageFinder from pip.exceptions import InstallationError, CommandError, PreviousBuildDirError from pip import cmdoptions class InstallCommand(Command): """ Install packages from: - PyPI (and other indexes) using requirement specifiers. - VCS project urls. - Local project directories. - Local or remote source archives. pip also supports installing from "requirements files", which provide an easy way to specify a whole environment to be installed. """ name = 'install' usage = """ %prog [options] <requirement specifier> ... %prog [options] -r <requirements file> ... %prog [options] [-e] <vcs project url> ... %prog [options] [-e] <local project path> ... %prog [options] <archive url/path> ...""" summary = 'Install packages.' bundle = False def __init__(self, *args, **kw): super(InstallCommand, self).__init__(*args, **kw) cmd_opts = self.cmd_opts cmd_opts.add_option( '-e', '--editable', dest='editables', action='append', default=[], metavar='path/url', help='Install a project in editable mode (i.e. setuptools "develop mode") from a local project path or a VCS url.') cmd_opts.add_option(cmdoptions.requirements.make()) cmd_opts.add_option(cmdoptions.build_dir.make()) cmd_opts.add_option( '-t', '--target', dest='target_dir', metavar='dir', default=None, help='Install packages into <dir>.') cmd_opts.add_option( '-d', '--download', '--download-dir', '--download-directory', dest='download_dir', metavar='dir', default=None, help="Download packages into <dir> instead of installing them, regardless of what's already installed.") cmd_opts.add_option(cmdoptions.download_cache.make()) cmd_opts.add_option( '--src', '--source', '--source-dir', '--source-directory', dest='src_dir', metavar='dir', default=src_prefix, help='Directory to check out editable projects into. ' 'The default in a virtualenv is "<venv path>/src". ' 'The default for global installs is "<current dir>/src".') cmd_opts.add_option( '-U', '--upgrade', dest='upgrade', action='store_true', help='Upgrade all packages to the newest available version. ' 'This process is recursive regardless of whether a dependency is already satisfied.') cmd_opts.add_option( '--force-reinstall', dest='force_reinstall', action='store_true', help='When upgrading, reinstall all packages even if they are ' 'already up-to-date.') cmd_opts.add_option( '-I', '--ignore-installed', dest='ignore_installed', action='store_true', help='Ignore the installed packages (reinstalling instead).') cmd_opts.add_option(cmdoptions.no_deps.make()) cmd_opts.add_option( '--no-install', dest='no_install', action='store_true', help="DEPRECATED. Download and unpack all packages, but don't actually install them.") cmd_opts.add_option( '--no-download', dest='no_download', action="store_true", help="DEPRECATED. Don't download any packages, just install the ones already downloaded " "(completes an install run with --no-install).") cmd_opts.add_option(cmdoptions.install_options.make()) cmd_opts.add_option(cmdoptions.global_options.make()) cmd_opts.add_option( '--user', dest='use_user_site', action='store_true', help='Install using the user scheme.') cmd_opts.add_option( '--egg', dest='as_egg', action='store_true', help="Install packages as eggs, not 'flat', like pip normally does. This option is not about installing *from* eggs. (WARNING: Because this option overrides pip's normal install logic, requirements files may not behave as expected.)") cmd_opts.add_option( '--root', dest='root_path', metavar='dir', default=None, help="Install everything relative to this alternate root directory.") cmd_opts.add_option( "--compile", action="store_true", dest="compile", default=True, help="Compile py files to pyc", ) cmd_opts.add_option( "--no-compile", action="store_false", dest="compile", help="Do not compile py files to pyc", ) cmd_opts.add_option(cmdoptions.use_wheel.make()) cmd_opts.add_option(cmdoptions.no_use_wheel.make()) cmd_opts.add_option( '--pre', action='store_true', default=False, help="Include pre-release and development versions. By default, pip only finds stable versions.") cmd_opts.add_option(cmdoptions.no_clean.make()) index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser) self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, cmd_opts) def _build_package_finder(self, options, index_urls, session): """ Create a package finder appropriate to this install command. This method is meant to be overridden by subclasses, not called directly. """ return PackageFinder(find_links=options.find_links, index_urls=index_urls, use_wheel=options.use_wheel, allow_external=options.allow_external, allow_unverified=options.allow_unverified, allow_all_external=options.allow_all_external, allow_all_prereleases=options.pre, process_dependency_links= options.process_dependency_links, session=session, ) def run(self, options, args): if ( options.no_install or options.no_download or (options.build_dir != build_prefix) or options.no_clean ): logger.deprecated('1.7', 'DEPRECATION: --no-install, --no-download, --build, ' 'and --no-clean are deprecated. See https://github.com/pypa/pip/issues/906.') if options.download_dir: options.no_install = True options.ignore_installed = True options.build_dir = os.path.abspath(options.build_dir) options.src_dir = os.path.abspath(options.src_dir) install_options = options.install_options or [] if options.use_user_site: if virtualenv_no_global(): raise InstallationError("Can not perform a '--user' install. User site-packages are not visible in this virtualenv.") install_options.append('--user') temp_target_dir = None if options.target_dir: options.ignore_installed = True temp_target_dir = tempfile.mkdtemp() options.target_dir = os.path.abspath(options.target_dir) if os.path.exists(options.target_dir) and not os.path.isdir(options.target_dir): raise CommandError("Target path exists but is not a directory, will not continue.") install_options.append('--home=' + temp_target_dir) global_options = options.global_options or [] index_urls = [options.index_url] + options.extra_index_urls if options.no_index: logger.notify('Ignoring indexes: %s' % ','.join(index_urls)) index_urls = [] if options.use_mirrors: logger.deprecated("1.7", "--use-mirrors has been deprecated and will be removed" " in the future. Explicit uses of --index-url and/or " "--extra-index-url is suggested.") if options.mirrors: logger.deprecated("1.7", "--mirrors has been deprecated and will be removed in " " the future. Explicit uses of --index-url and/or " "--extra-index-url is suggested.") index_urls += options.mirrors session = self._build_session(options) finder = self._build_package_finder(options, index_urls, session) requirement_set = RequirementSet( build_dir=options.build_dir, src_dir=options.src_dir, download_dir=options.download_dir, download_cache=options.download_cache, upgrade=options.upgrade, as_egg=options.as_egg, ignore_installed=options.ignore_installed, ignore_dependencies=options.ignore_dependencies, force_reinstall=options.force_reinstall, use_user_site=options.use_user_site, target_dir=temp_target_dir, session=session, pycompile=options.compile, ) for name in args: requirement_set.add_requirement( InstallRequirement.from_line(name, None)) for name in options.editables: requirement_set.add_requirement( InstallRequirement.from_editable(name, default_vcs=options.default_vcs)) for filename in options.requirements: for req in parse_requirements(filename, finder=finder, options=options, session=session): requirement_set.add_requirement(req) if not requirement_set.has_requirements: opts = {'name': self.name} if options.find_links: msg = ('You must give at least one requirement to %(name)s ' '(maybe you meant "pip %(name)s %(links)s"?)' % dict(opts, links=' '.join(options.find_links))) else: msg = ('You must give at least one requirement ' 'to %(name)s (see "pip help %(name)s")' % opts) logger.warn(msg) return try: if not options.no_download: requirement_set.prepare_files(finder, force_root_egg_info=self.bundle, bundle=self.bundle) else: requirement_set.locate_files() if not options.no_install and not self.bundle: requirement_set.install(install_options, global_options, root=options.root_path) installed = ' '.join([req.name for req in requirement_set.successfully_installed]) if installed: logger.notify('Successfully installed %s' % installed) elif not self.bundle: downloaded = ' '.join([req.name for req in requirement_set.successfully_downloaded]) if downloaded: logger.notify('Successfully downloaded %s' % downloaded) elif self.bundle: requirement_set.create_bundle(self.bundle_filename) logger.notify('Created bundle in %s' % self.bundle_filename) except PreviousBuildDirError: options.no_clean = True raise finally: # Clean up if (not options.no_clean) and ((not options.no_install) or options.download_dir): requirement_set.cleanup_files(bundle=self.bundle) if options.target_dir: if not os.path.exists(options.target_dir): os.makedirs(options.target_dir) lib_dir = distutils_scheme('', home=temp_target_dir)['purelib'] for item in os.listdir(lib_dir): shutil.move( os.path.join(lib_dir, item), os.path.join(options.target_dir, item) ) shutil.rmtree(temp_target_dir) return requirement_set
mit
Slater-Victoroff/scrapy
scrapy/core/downloader/handlers/ftp.py
130
4134
""" An asynchronous FTP file download handler for scrapy which somehow emulates an http response. FTP connection parameters are passed using the request meta field: - ftp_user (required) - ftp_password (required) - ftp_passive (by default, enabled) sets FTP connection passive mode - ftp_local_filename - If not given, file data will come in the response.body, as a normal scrapy Response, which will imply that the entire file will be on memory. - if given, file data will be saved in a local file with the given name This helps when downloading very big files to avoid memory issues. In addition, for convenience the local file name will also be given in the response body. The status of the built html response will be, by default - 200 in case of success - 404 in case specified file was not found in the server (ftp code 550) or raise corresponding ftp exception otherwise The matching from server ftp command return codes to html response codes is defined in the CODE_MAPPING attribute of the handler class. The key 'default' is used for any code that is not explicitly present among the map keys. You may need to overwrite this mapping if want a different behaviour than default. In case of status 200 request, response.headers will come with two keys: 'Local Filename' - with the value of the local filename if given 'Size' - with size of the downloaded data """ import re from io import BytesIO from six.moves.urllib.parse import urlparse, unquote from twisted.internet import reactor from twisted.protocols.ftp import FTPClient, CommandFailed from twisted.internet.protocol import Protocol, ClientCreator from scrapy.http import Response from scrapy.responsetypes import responsetypes class ReceivedDataProtocol(Protocol): def __init__(self, filename=None): self.__filename = filename self.body = open(filename, "w") if filename else BytesIO() self.size = 0 def dataReceived(self, data): self.body.write(data) self.size += len(data) @property def filename(self): return self.__filename def close(self): self.body.close() if self.filename else self.body.seek(0) _CODE_RE = re.compile("\d+") class FTPDownloadHandler(object): CODE_MAPPING = { "550": 404, "default": 503, } def __init__(self, setting): pass def download_request(self, request, spider): parsed_url = urlparse(request.url) creator = ClientCreator(reactor, FTPClient, request.meta["ftp_user"], request.meta["ftp_password"], passive=request.meta.get("ftp_passive", 1)) return creator.connectTCP(parsed_url.hostname, parsed_url.port or 21).addCallback(self.gotClient, request, unquote(parsed_url.path)) def gotClient(self, client, request, filepath): self.client = client protocol = ReceivedDataProtocol(request.meta.get("ftp_local_filename")) return client.retrieveFile(filepath, protocol)\ .addCallbacks(callback=self._build_response, callbackArgs=(request, protocol), errback=self._failed, errbackArgs=(request,)) def _build_response(self, result, request, protocol): self.result = result respcls = responsetypes.from_args(url=request.url) protocol.close() body = protocol.filename or protocol.body.read() headers = {"local filename": protocol.filename or '', "size": protocol.size} return respcls(url=request.url, status=200, body=body, headers=headers) def _failed(self, result, request): message = result.getErrorMessage() if result.type == CommandFailed: m = _CODE_RE.search(message) if m: ftpcode = m.group() httpcode = self.CODE_MAPPING.get(ftpcode, self.CODE_MAPPING["default"]) return Response(url=request.url, status=httpcode, body=message) raise result.type(result.value)
bsd-3-clause
VUIIS/seam
seam/freesurfer/v1/__init__.py
1
1331
#!/usr/bin/env python # -*- coding: utf-8 -*- """ V1 defines the following recipes: * :func:`seam.freesurfer.v1.build_recipe` for building a complete script for executing the recon-all pipeline. V1 defines the following functions: * ``recon-all -all`` exposed through :func:`seam.freesurfer.v1.recon_all` * ``recon-all -i`` exposed through :func:`seam.freesurfer.v1.recon_input` * :func:`seam.freesurfer.v1.tkmedit_screenshot_tcl` for generating tcl to take screenshots of a volume loaded in ``tkmedit``. * :func:`seam.freesurfer.v1.tkmedit_screenshot_cmd` for supplying a command to execute ``tkmedit`` with a tcl script. * :func:`seam.freesurfer.v1.tksurfer_screenshot_tcl` for generating a tcl script to take screenshots of a hemisphere using ``tksurfer`` * :func:`seam.freesurfer.v1.tksurfer_screenshot_cmd` for supplying a command to run ``tksurfer`` and generate screenshots. * :func:`seam.freesufer.v1.annot2label_cmd` for building a ``mri_annotation2label`` command. """ __author__ = 'Scott Burns <scott.s.burns@vanderbilt.edu>' __copyright__ = 'Copyright 2013 Vanderbilt University. All Rights Reserved' from .core import recon_all, recon_input, tkmedit_screenshot_tcl, \ tkmedit_screenshot_cmd, tksurfer_screenshot_tcl, tksurfer_screenshot_cmd, \ annot2label_cmd from .recipe import build_recipe
mit
erikruiter2/ipbucket
database.py
1
2871
from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.ext.declarative import declarative_base from ipaddr_func import long2ip from sqlalchemy.exc import IntegrityError engine = create_engine('sqlite:////tmp/ipbucket.db', convert_unicode=True) db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine)) Base = declarative_base() Base.query = db_session.query_property() def init_db(): # import all modules here that might define models so that # they will be registered properly on the metadata. Otherwise # you will have to import them first before calling init_db() import models Base.metadata.create_all(bind=engine) def db_add_entry(tablename, **kwargs): """ Adds an entry to a table, returns an errormessage on failure, on success it returns the id of the created object""" try: n = tablename(**kwargs) db_session.add(n) db_session.commit() except ValueError as e: return "Error while adding entry: " + str(e) except TypeError as e: return "Error while adding entry: " + str(e) except IntegrityError as e: db_session.rollback() return "Error while adding entry: " + str(e.orig.message) + str(e.params) return n.id def db_delete_entry(tablename, **kwargs): """ Deletes an entry from a table, returns an errormessage on failure, on success it returns True""" try: entry = tablename.query.filter_by(**kwargs).first() if entry: db_session.delete(entry) db_session.commit() else: return "Entry not found" except ValueError as e: return "Error while deleting entry: " + str(e) except TypeError as e: return "Error while deleting entry: " + str(e) except IntegrityError as e: db_session.rollback() return "Error while deleting entry: " + str(e.orig.message) + str(e.params) return True def db_query_all(tablename): print tablename.__tablename__ entries = list() entrylist = dict() for entry in tablename.query.all(): for value in vars(entry): if value != '_sa_instance_state': entrylist[value] = vars(entry)[value] if value == 'ip' and tablename.__tablename__ == 'ipv4network': entrylist[value] = long2ip(vars(entry)[value]) + "/" + str(vars(entry)['mask']) entries.append(entrylist) entrylist = dict() return entries def db_query_by_id(tablename, _id): entrylist = dict() entry = tablename.query.filter_by(id=_id).first() for value in vars(entry): if value != '_sa_instance_state': entrylist[value] = vars(entry)[value] return entrylist
gpl-2.0
karlito40/servo
tests/wpt/web-platform-tests/webdriver/javascript/execute_script_test.py
65
5664
import os import sys import unittest sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../.."))) import base_test class ExecuteScriptTest(base_test.WebDriverBaseTest): def test_ecmascript_translates_null_return_to_none(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) result = self.driver.execute_script("return null;") self.assertIsNone(result) def test_ecmascript_translates_undefined_return_to_none(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) result = self.driver.execute_script("var undef; return undef;") self.assertIsNone(result) def test_can_return_numbers_from_scripts(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) self.assertEquals(1, self.driver.execute_script("return 1;")) self.assertEquals(3.14, self.driver.execute_script("return 3.14;")) def test_can_return_strings_from_scripts(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) self.assertEquals("hello, world!", self.driver.execute_script("return 'hello, world!'")) def test_can_return_booleans_from_scripts(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) self.assertTrue(self.driver.execute_script("return true;")) self.assertFalse(self.driver.execute_script("return false;")) def test_can_return_an_array_of_primitives(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) result = self.driver.execute_script("return [1, false, null, 3.14]") self.assertListEqual([1, False, None, 3.14], result) def test_can_return_nested_arrays(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) result = self.driver.execute_script("return [[1, 2, [3]]]") self.assertIsInstance(result, list) self.assertEquals(1, len(result)) result = result[0] self.assertListEqual([1, 2], result[:2]) self.assertListEqual([3], result[2]) def test_can_return_object_literals(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) result = self.driver.execute_script("return {}") self.assertDictEqual({}, result) result = self.driver.execute_script("return {a: 1, b: false, c: null}") self.assertDictEqual({ "a": 1, "b": False, "c": None }, result) def test_can_return_complex_object_literals(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) result = self.driver.execute_script("return {a:{b: 'hello'}}") self.assertIsInstance(result, dict) self.assertIsInstance(result['a'], dict) self.assertDictEqual({"b": "hello"}, result["a"]) def test_dom_element_return_value_is_translated_to_a_web_element(self): self.driver.get(self.webserver.where_is( "javascript/res/return_document_body.html")) result = self.driver.execute_script("return document.body") self.assertEquals(result.text, "Hello, world!") def test_return_an_array_of_dom_elements(self): self.driver.get(self.webserver.where_is( "javascript/res/return_array_of_dom_elements.html")) result = self.driver.execute_script( "var nodes = document.getElementsByTagName('div');" "return [nodes[0], nodes[1]]") self.assertIsInstance(result, list) self.assertEquals(2, len(result)) self.assertEquals("a", result[0].text) self.assertEquals("b", result[1].text) def test_node_list_return_value_is_translated_to_list_of_web_elements(self): self.driver.get(self.webserver.where_is( "javascript/res/return_array_of_dom_elements.html")) result = self.driver.execute_script( "return document.getElementsByTagName('div');") self.assertIsInstance(result, list) self.assertEquals(2, len(result)) self.assertEquals("a", result[0].text) self.assertEquals("b", result[1].text) def test_return_object_literal_with_dom_element_property(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) result = self.driver.execute_script("return {a: document.body}") self.assertIsInstance(result, dict) self.assertEquals("body", result["a"].tag_name) def test_scripts_execute_in_anonymous_function_and_do_not_pollute_global_scope(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) self.driver.execute_script("var x = 1;") self.assertEquals("undefined", self.driver.execute_script("return typeof x;")); def test_scripts_can_modify_context_window_object(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) self.driver.execute_script("window.x = 1;") self.assertEquals("number", self.driver.execute_script("return typeof x;")); self.assertEquals(1, self.driver.execute_script("return x;")); def test_that_ecmascript_returns_document_title(self): self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html")) result = self.driver.execute_script("return document.title;") self.assertEquals("executeScript test", result) if __name__ == "__main__": unittest.main()
mpl-2.0
pxsdirac/vnpy
vn.trader/gateway.py
9
14192
# encoding: UTF-8 from eventEngine import * # 默认空值 EMPTY_STRING = '' EMPTY_UNICODE = u'' EMPTY_INT = 0 EMPTY_FLOAT = 0.0 # 方向常量 DIRECTION_NONE = u'无方向' DIRECTION_LONG = u'多' DIRECTION_SHORT = u'空' DIRECTION_UNKNOWN = u'未知' DIRECTION_NET = u'净' # 开平常量 OFFSET_NONE = u'无开平' OFFSET_OPEN = u'开仓' OFFSET_CLOSE = u'平仓' OFFSET_UNKNOWN = u'未知' # 状态常量 STATUS_NOTTRADED = u'未成交' STATUS_PARTTRADED = u'部分成交' STATUS_ALLTRADED = u'全部成交' STATUS_CANCELLED = u'已撤销' STATUS_UNKNOWN = u'未知' # 合约类型常量 PRODUCT_EQUITY = u'股票' PRODUCT_FUTURES = u'期货' PRODUCT_OPTION = u'期权' PRODUCT_INDEX = u'指数' PRODUCT_COMBINATION = u'组合' # 期权类型 OPTION_CALL = u'看涨期权' OPTION_PUT = u'看跌期权' ######################################################################## class VtGateway(object): """交易接口""" #---------------------------------------------------------------------- def __init__(self, eventEngine): """Constructor""" self.eventEngine = eventEngine #---------------------------------------------------------------------- def onTick(self, tick): """市场行情推送""" # 通用事件 event1 = Event(type_=EVENT_TICK) event1.dict_['data'] = tick self.eventEngine.put(event1) # 特定合约代码的事件 event2 = Event(type_=EVENT_TICK+tick.vtSymbol) event2.dict_['data'] = tick self.eventEngine.put(event2) #---------------------------------------------------------------------- def onTrade(self, trade): """成交信息推送""" # 因为成交通常都是事后才会知道成交编号,因此只需要推送通用事件 event1 = Event(type_=EVENT_TRADE) event1.dict_['data'] = trade self.eventEngine.put(event1) #---------------------------------------------------------------------- def onOrder(self, order): """订单变化推送""" # 通用事件 event1 = Event(type_=EVENT_ORDER) event1.dict_['data'] = order self.eventEngine.put(event1) # 特定订单编号的事件 event2 = Event(type_=EVENT_ORDER+order.vtOrderID) event2.dict_['data'] = order self.eventEngine.put(event2) #---------------------------------------------------------------------- def onPosition(self, position): """持仓信息推送""" # 通用事件 event1 = Event(type_=EVENT_POSITION) event1.dict_['data'] = position self.eventEngine.put(event1) # 特定合约代码的事件 event2 = Event(type_=EVENT_POSITION+position.vtPositionName) event2.dict_['data'] = position self.eventEngine.put(event2) #---------------------------------------------------------------------- def onAccount(self, account): """账户信息推送""" # 通用事件 event1 = Event(type_=EVENT_ACCOUNT) event1.dict_['data'] = account self.eventEngine.put(event1) # 特定合约代码的事件 event2 = Event(type_=EVENT_ACCOUNT+account.vtAccountID) event2.dict_['data'] = account self.eventEngine.put(event2) #---------------------------------------------------------------------- def onError(self, error): """错误信息推送""" # 通用事件 event1 = Event(type_=EVENT_ERROR) event1.dict_['data'] = error self.eventEngine.put(event1) #---------------------------------------------------------------------- def onLog(self, log): """日志推送""" # 通用事件 event1 = Event(type_=EVENT_LOG) event1.dict_['data'] = log self.eventEngine.put(event1) #---------------------------------------------------------------------- def onContract(self, contract): """合约基础信息推送""" # 通用事件 event1 = Event(type_=EVENT_CONTRACT) event1.dict_['data'] = contract self.eventEngine.put(event1) #---------------------------------------------------------------------- def connect(self): """连接""" pass #---------------------------------------------------------------------- def subscribe(self): """订阅行情""" pass #---------------------------------------------------------------------- def sendOrder(self): """发单""" pass #---------------------------------------------------------------------- def cancelOrder(self): """撤单""" pass #---------------------------------------------------------------------- def close(self): """关闭""" pass ######################################################################## class VtBaseData(object): """回调函数推送数据的基础类,其他数据类继承于此""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.gatewayName = EMPTY_STRING # Gateway名称 self.rawData = None # 原始数据 ######################################################################## class VtTickData(VtBaseData): """Tick行情数据类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(VtTickData, self).__init__() # 代码相关 self.symbol = EMPTY_STRING # 合约代码 self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 Gateway名.合约代码 # 成交数据 self.lastPrice = EMPTY_FLOAT # 最新成交价 self.volume = EMPTY_INT # 最新成交量 self.openInterest = EMPTY_INT # 持仓量 self.tickTime = EMPTY_STRING # 更新时间 # 五档行情 self.bidPrice1 = EMPTY_FLOAT self.bidPrice2 = EMPTY_FLOAT self.bidPrice3 = EMPTY_FLOAT self.bidPrice4 = EMPTY_FLOAT self.bidPrice5 = EMPTY_FLOAT self.askPrice1 = EMPTY_FLOAT self.askPrice2 = EMPTY_FLOAT self.askPrice3 = EMPTY_FLOAT self.askPrice4 = EMPTY_FLOAT self.askPrice5 = EMPTY_FLOAT self.bidVolume1 = EMPTY_INT self.bidVolume2 = EMPTY_INT self.bidVolume3 = EMPTY_INT self.bidVolume4 = EMPTY_INT self.bidVolume5 = EMPTY_INT self.askVolume1 = EMPTY_INT self.askVolume2 = EMPTY_INT self.askVolume3 = EMPTY_INT self.askVolume4 = EMPTY_INT self.askVolume5 = EMPTY_INT ######################################################################## class VtTradeData(VtBaseData): """成交数据类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(VtTradeData, self).__init__() # 代码编号相关 self.symbol = EMPTY_STRING # 合约代码 self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 Gateway名.合约代码 self.tradeID = EMPTY_STRING # 成交编号 self.vtTradeID = EMPTY_STRING # 成交在vt系统中的唯一编号,通常是 Gateway名.成交编号 self.orderID = EMPTY_STRING # 订单编号 self.vtOrderID = EMPTY_STRING # 订单在vt系统中的唯一编号,通常是 Gateway名.订单编号 # 成交相关 self.direction = EMPTY_UNICODE # 成交方向 self.offset = EMPTY_UNICODE # 成交开平仓 self.price = EMPTY_FLOAT # 成交价格 self.volume = EMPTY_INT # 成交数量 self.tradeTime = EMPTY_STRING # 成交时间 ######################################################################## class VtOrderData(VtBaseData): """订单数据类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(VtOrderData, self).__init__() # 代码编号相关 self.symbol = EMPTY_STRING # 合约代码 self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 Gateway名.合约代码 self.orderID = EMPTY_STRING # 订单编号 self.vtOrderID = EMPTY_STRING # 订单在vt系统中的唯一编号,通常是 Gateway名.订单编号 # 报单相关 self.direction = EMPTY_UNICODE # 报单方向 self.offset = EMPTY_UNICODE # 报单开平仓 self.price = EMPTY_FLOAT # 报单价格 self.totalVolume = EMPTY_INT # 报单总数量 self.tradedVolume = EMPTY_INT # 报单成交数量 self.status = EMPTY_UNICODE # 报单状态 self.orderTime = EMPTY_STRING # 发单时间 self.cancelTime = EMPTY_STRING # 撤单时间 # CTP/LTS相关 self.frontID = EMPTY_INT # 前置机编号 self.sessionID = EMPTY_INT # 连接编号 ######################################################################## class VtPositionData(VtBaseData): """持仓数据类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(VtPositionData, self).__init__() # 代码编号相关 self.symbol = EMPTY_STRING # 合约代码 self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 Gateway名.合约代码 # 持仓相关 self.direction = EMPTY_STRING # 持仓方向 self.position = EMPTY_INT # 持仓量 self.frozen = EMPTY_INT # 冻结数量 self.price = EMPTY_FLOAT # 持仓均价 self.vtPositionName = EMPTY_STRING # 持仓在vt系统中的唯一代码,通常是vtSymbol.方向 ######################################################################## class VtAccountData(VtBaseData): """账户数据类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(VtAccountData, self).__init__() # 账号代码相关 self.accountID = EMPTY_STRING # 账户代码 self.vtAccountID = EMPTY_STRING # 账户在vt中的唯一代码,通常是 Gateway名.账户代码 # 数值相关 self.preBalance = EMPTY_FLOAT # 昨日账户结算净值 self.balance = EMPTY_FLOAT # 账户净值 self.available = EMPTY_FLOAT # 可用资金 self.commission = EMPTY_FLOAT # 今日手续费 self.margin = EMPTY_FLOAT # 保证金占用 self.closeProfit = EMPTY_FLOAT # 平仓盈亏 self.positionProfit = EMPTY_FLOAT # 持仓盈亏 ######################################################################## class VtErrorData(VtBaseData): """错误数据类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(VtErrorData, self).__init__() self.errorID = EMPTY_STRING # 错误代码 self.errorMsg = EMPTY_UNICODE # 错误信息 ######################################################################## class VtLogData(VtBaseData): """日志数据类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(VtLogData, self).__init__() self.logContent = EMPTY_UNICODE # 日志信息 ######################################################################## class VtContractData(VtBaseData): """合约详细信息类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(VtBaseData, self).__init__() self.symbol = EMPTY_STRING self.vtSymbol = EMPTY_STRING self.productClass = EMPTY_STRING self.size = EMPTY_INT self.priceTick = EMPTY_FLOAT # 期权相关 self.strikePrice = EMPTY_FLOAT self.underlyingSymbol = EMPTY_STRING self.optionType = EMPTY_UNICODE ######################################################################## class VtSubscribeReq: """订阅行情时传入的对象类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.symbol = EMPTY_STRING self.exchange = EMPTY_STRING ######################################################################## class VtOrderReq: """发单时传入的对象类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.symbol = EMPTY_STRING ######################################################################## class VtCancelOrderReq: """撤单时传入的对象类""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.symbol = EMPTY_STRING self.exchange = EMPTY_STRING self.
mit
fran-penedo/dreal
benchmarks/network/thermostat/thermostat-double-ind.py
11
3535
from gen import * ########## # shared # ########## flow_var[0] = """ (declare-fun tau () Real) (declare-fun x1 () Real) (declare-fun x2 () Real) """ flow_dec[0] = """ (define-ode flow_1 ((= d/dt[x1] (* 0.015 (- 100 (+ (* (- 1 0.01) x1) (* 0.01 x2))))) (= d/dt[x2] (* 0.045 (- 200 (+ (* (- 1 0.01) x2) (* 0.01 x1))))) (= d/dt[tau] 1))) (define-ode flow_2 ((= d/dt[x1] (* 0.015 (- 100 (+ (* (- 1 0.01) x1) (* 0.01 x2))))) (= d/dt[x2] (* -0.045 (+ (* (- 1 0.01) x2) (* 0.01 x1)))) (= d/dt[tau] 1))) (define-ode flow_3 ((= d/dt[x1] (* -0.015 (+ (* (- 1 0.01) x1) (* 0.01 x2)))) (= d/dt[x2] (* 0.045 (- 200 (+ (* (- 1 0.01) x2) (* 0.01 x1))))) (= d/dt[tau] 1))) (define-ode flow_4 ((= d/dt[x1] (* -0.015 (+ (* (- 1 0.01) x1) (* 0.01 x2)))) (= d/dt[x2] (* -0.045 (+ (* (- 1 0.01) x2) (* 0.01 x1)))) (= d/dt[tau] 1))) """ state_dec[0] = """ (declare-fun time_{0} () Real) (declare-fun tau_{0}_0 () Real) (declare-fun tau_{0}_t () Real) (declare-fun mode1_{0} () Bool) (declare-fun x1_{0}_0 () Real) (declare-fun x1_{0}_t () Real) (declare-fun mode2_{0} () Bool) (declare-fun x2_{0}_0 () Real) (declare-fun x2_{0}_t () Real) """ state_val[0] = """ (assert (<= 0 time_{0})) (assert (<= time_{0} 1)) (assert (<= 0 tau_{0}_0)) (assert (<= tau_{0}_0 1)) (assert (<= 0 tau_{0}_t)) (assert (<= tau_{0}_t 1)) (assert (<= -20 x1_{0}_0)) (assert (<= x1_{0}_0 100)) (assert (<= -20 x1_{0}_t)) (assert (<= x1_{0}_t 100)) (assert (<= -20 x2_{0}_0)) (assert (<= x2_{0}_0 100)) (assert (<= -20 x2_{0}_t)) (assert (<= x2_{0}_t 100)) """ cont_cond[0] = [""" (assert (and (>= tau_{0}_0 0) (<= tau_{0}_0 1) (>= tau_{0}_t 0) (<= tau_{0}_t 1) (forall_t 1 [0 time_{0}] (>= tau_{0}_t 0)) (forall_t 2 [0 time_{0}] (<= tau_{0}_t 1)))) (assert (or (and (= mode1_{0} true) (= mode2_{0} true) (= [x1_{0}_t x2_{0}_t tau_{0}_t] (integral 0. time_{0} [x1_{0}_0 x2_{0}_0 tau_{0}_0] flow_1))) (and (= mode1_{0} true) (= mode2_{0} false) (= [x1_{0}_t x2_{0}_t tau_{0}_t] (integral 0. time_{0} [x1_{0}_0 x2_{0}_0 tau_{0}_0] flow_2))) (and (= mode1_{0} false) (= mode2_{0} true) (= [x1_{0}_t x2_{0}_t tau_{0}_t] (integral 0. time_{0} [x1_{0}_0 x2_{0}_0 tau_{0}_0] flow_3))) (and (= mode1_{0} false) (= mode2_{0} false) (= [x1_{0}_t x2_{0}_t tau_{0}_t] (integral 0. time_{0} [x1_{0}_0 x2_{0}_0 tau_{0}_0] flow_4)))))"""] jump_cond[0] = [""" (assert (and (= tau_{0}_t 1) (= tau_{1}_0 0))) (assert (and (= x1_{1}_0 x1_{0}_t))) (assert (or (and (<= x1_{0}_t 20) (= mode1_{1} true)) (and (> x1_{0}_t 20) (= mode1_{1} false)))) (assert (and (= x2_{1}_0 x2_{0}_t))) (assert (or (and (<= x2_{0}_t 20) (= mode2_{1} true)) (and (> x2_{0}_t 20) (= mode2_{1} false))))"""] ############# # Init/Goal # ############# init_cond = """ (assert (< 0.99 tau_{0}_0)) (assert (and (> x1_{0}_0 (- 20 8)) (< x1_{0}_0 (+ 20 8)) (> x2_{0}_0 (- 20 8)) (< x2_{0}_0 (+ 20 8)))) """ goal_cond = """ (assert (< 0.99 tau_{0}_t)) (assert (not (and (> x1_{0}_t (- 20 8)) (< x1_{0}_t (+ 20 8)) (> x2_{0}_t (- 20 8)) (< x2_{0}_t (+ 20 8))))) """ import sys try: bound = int(sys.argv[1]) except: print("Usage:", sys.argv[0], "<Bound>") else: generate(bound, 1, [0], 0, init_cond, goal_cond)
gpl-3.0
denny820909/builder
lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/web/test/test_httpauth.py
41
22445
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.web._auth}. """ from zope.interface import implements from zope.interface.verify import verifyObject from twisted.trial import unittest from twisted.python.failure import Failure from twisted.internet.error import ConnectionDone from twisted.internet.address import IPv4Address from twisted.cred import error, portal from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse from twisted.cred.checkers import ANONYMOUS, AllowAnonymousAccess from twisted.cred.credentials import IUsernamePassword from twisted.web.iweb import ICredentialFactory from twisted.web.resource import IResource, Resource, getChildForRequest from twisted.web._auth import basic, digest from twisted.web._auth.wrapper import HTTPAuthSessionWrapper, UnauthorizedResource from twisted.web._auth.basic import BasicCredentialFactory from twisted.web.server import NOT_DONE_YET from twisted.web.static import Data from twisted.web.test.test_web import DummyRequest def b64encode(s): return s.encode('base64').strip() class BasicAuthTestsMixin: """ L{TestCase} mixin class which defines a number of tests for L{basic.BasicCredentialFactory}. Because this mixin defines C{setUp}, it must be inherited before L{TestCase}. """ def setUp(self): self.request = self.makeRequest() self.realm = 'foo' self.username = 'dreid' self.password = 'S3CuR1Ty' self.credentialFactory = basic.BasicCredentialFactory(self.realm) def makeRequest(self, method='GET', clientAddress=None): """ Create a request object to be passed to L{basic.BasicCredentialFactory.decode} along with a response value. Override this in a subclass. """ raise NotImplementedError("%r did not implement makeRequest" % ( self.__class__,)) def test_interface(self): """ L{BasicCredentialFactory} implements L{ICredentialFactory}. """ self.assertTrue( verifyObject(ICredentialFactory, self.credentialFactory)) def test_usernamePassword(self): """ L{basic.BasicCredentialFactory.decode} turns a base64-encoded response into a L{UsernamePassword} object with a password which reflects the one which was encoded in the response. """ response = b64encode('%s:%s' % (self.username, self.password)) creds = self.credentialFactory.decode(response, self.request) self.assertTrue(IUsernamePassword.providedBy(creds)) self.assertTrue(creds.checkPassword(self.password)) self.assertFalse(creds.checkPassword(self.password + 'wrong')) def test_incorrectPadding(self): """ L{basic.BasicCredentialFactory.decode} decodes a base64-encoded response with incorrect padding. """ response = b64encode('%s:%s' % (self.username, self.password)) response = response.strip('=') creds = self.credentialFactory.decode(response, self.request) self.assertTrue(verifyObject(IUsernamePassword, creds)) self.assertTrue(creds.checkPassword(self.password)) def test_invalidEncoding(self): """ L{basic.BasicCredentialFactory.decode} raises L{LoginFailed} if passed a response which is not base64-encoded. """ response = 'x' # one byte cannot be valid base64 text self.assertRaises( error.LoginFailed, self.credentialFactory.decode, response, self.makeRequest()) def test_invalidCredentials(self): """ L{basic.BasicCredentialFactory.decode} raises L{LoginFailed} when passed a response which is not valid base64-encoded text. """ response = b64encode('123abc+/') self.assertRaises( error.LoginFailed, self.credentialFactory.decode, response, self.makeRequest()) class RequestMixin: def makeRequest(self, method='GET', clientAddress=None): """ Create a L{DummyRequest} (change me to create a L{twisted.web.http.Request} instead). """ request = DummyRequest('/') request.method = method request.client = clientAddress return request class BasicAuthTestCase(RequestMixin, BasicAuthTestsMixin, unittest.TestCase): """ Basic authentication tests which use L{twisted.web.http.Request}. """ class DigestAuthTestCase(RequestMixin, unittest.TestCase): """ Digest authentication tests which use L{twisted.web.http.Request}. """ def setUp(self): """ Create a DigestCredentialFactory for testing """ self.realm = "test realm" self.algorithm = "md5" self.credentialFactory = digest.DigestCredentialFactory( self.algorithm, self.realm) self.request = self.makeRequest() def test_decode(self): """ L{digest.DigestCredentialFactory.decode} calls the C{decode} method on L{twisted.cred.digest.DigestCredentialFactory} with the HTTP method and host of the request. """ host = '169.254.0.1' method = 'GET' done = [False] response = object() def check(_response, _method, _host): self.assertEqual(response, _response) self.assertEqual(method, _method) self.assertEqual(host, _host) done[0] = True self.patch(self.credentialFactory.digest, 'decode', check) req = self.makeRequest(method, IPv4Address('TCP', host, 81)) self.credentialFactory.decode(response, req) self.assertTrue(done[0]) def test_interface(self): """ L{DigestCredentialFactory} implements L{ICredentialFactory}. """ self.assertTrue( verifyObject(ICredentialFactory, self.credentialFactory)) def test_getChallenge(self): """ The challenge issued by L{DigestCredentialFactory.getChallenge} must include C{'qop'}, C{'realm'}, C{'algorithm'}, C{'nonce'}, and C{'opaque'} keys. The values for the C{'realm'} and C{'algorithm'} keys must match the values supplied to the factory's initializer. None of the values may have newlines in them. """ challenge = self.credentialFactory.getChallenge(self.request) self.assertEqual(challenge['qop'], 'auth') self.assertEqual(challenge['realm'], 'test realm') self.assertEqual(challenge['algorithm'], 'md5') self.assertIn('nonce', challenge) self.assertIn('opaque', challenge) for v in challenge.values(): self.assertNotIn('\n', v) def test_getChallengeWithoutClientIP(self): """ L{DigestCredentialFactory.getChallenge} can issue a challenge even if the L{Request} it is passed returns C{None} from C{getClientIP}. """ request = self.makeRequest('GET', None) challenge = self.credentialFactory.getChallenge(request) self.assertEqual(challenge['qop'], 'auth') self.assertEqual(challenge['realm'], 'test realm') self.assertEqual(challenge['algorithm'], 'md5') self.assertIn('nonce', challenge) self.assertIn('opaque', challenge) class UnauthorizedResourceTests(unittest.TestCase): """ Tests for L{UnauthorizedResource}. """ def test_getChildWithDefault(self): """ An L{UnauthorizedResource} is every child of itself. """ resource = UnauthorizedResource([]) self.assertIdentical( resource.getChildWithDefault("foo", None), resource) self.assertIdentical( resource.getChildWithDefault("bar", None), resource) def _unauthorizedRenderTest(self, request): """ Render L{UnauthorizedResource} for the given request object and verify that the response code is I{Unauthorized} and that a I{WWW-Authenticate} header is set in the response containing a challenge. """ resource = UnauthorizedResource([ BasicCredentialFactory('example.com')]) request.render(resource) self.assertEqual(request.responseCode, 401) self.assertEqual( request.responseHeaders.getRawHeaders('www-authenticate'), ['basic realm="example.com"']) def test_render(self): """ L{UnauthorizedResource} renders with a 401 response code and a I{WWW-Authenticate} header and puts a simple unauthorized message into the response body. """ request = DummyRequest(['']) self._unauthorizedRenderTest(request) self.assertEqual('Unauthorized', ''.join(request.written)) def test_renderHEAD(self): """ The rendering behavior of L{UnauthorizedResource} for a I{HEAD} request is like its handling of a I{GET} request, but no response body is written. """ request = DummyRequest(['']) request.method = 'HEAD' self._unauthorizedRenderTest(request) self.assertEqual('', ''.join(request.written)) def test_renderQuotesRealm(self): """ The realm value included in the I{WWW-Authenticate} header set in the response when L{UnauthorizedResounrce} is rendered has quotes and backslashes escaped. """ resource = UnauthorizedResource([ BasicCredentialFactory('example\\"foo')]) request = DummyRequest(['']) request.render(resource) self.assertEqual( request.responseHeaders.getRawHeaders('www-authenticate'), ['basic realm="example\\\\\\"foo"']) class Realm(object): """ A simple L{IRealm} implementation which gives out L{WebAvatar} for any avatarId. @type loggedIn: C{int} @ivar loggedIn: The number of times C{requestAvatar} has been invoked for L{IResource}. @type loggedOut: C{int} @ivar loggedOut: The number of times the logout callback has been invoked. """ implements(portal.IRealm) def __init__(self, avatarFactory): self.loggedOut = 0 self.loggedIn = 0 self.avatarFactory = avatarFactory def requestAvatar(self, avatarId, mind, *interfaces): if IResource in interfaces: self.loggedIn += 1 return IResource, self.avatarFactory(avatarId), self.logout raise NotImplementedError() def logout(self): self.loggedOut += 1 class HTTPAuthHeaderTests(unittest.TestCase): """ Tests for L{HTTPAuthSessionWrapper}. """ makeRequest = DummyRequest def setUp(self): """ Create a realm, portal, and L{HTTPAuthSessionWrapper} to use in the tests. """ self.username = 'foo bar' self.password = 'bar baz' self.avatarContent = "contents of the avatar resource itself" self.childName = "foo-child" self.childContent = "contents of the foo child of the avatar" self.checker = InMemoryUsernamePasswordDatabaseDontUse() self.checker.addUser(self.username, self.password) self.avatar = Data(self.avatarContent, 'text/plain') self.avatar.putChild( self.childName, Data(self.childContent, 'text/plain')) self.avatars = {self.username: self.avatar} self.realm = Realm(self.avatars.get) self.portal = portal.Portal(self.realm, [self.checker]) self.credentialFactories = [] self.wrapper = HTTPAuthSessionWrapper( self.portal, self.credentialFactories) def _authorizedBasicLogin(self, request): """ Add an I{basic authorization} header to the given request and then dispatch it, starting from C{self.wrapper} and returning the resulting L{IResource}. """ authorization = b64encode(self.username + ':' + self.password) request.headers['authorization'] = 'Basic ' + authorization return getChildForRequest(self.wrapper, request) def test_getChildWithDefault(self): """ Resource traversal which encounters an L{HTTPAuthSessionWrapper} results in an L{UnauthorizedResource} instance when the request does not have the required I{Authorization} headers. """ request = self.makeRequest([self.childName]) child = getChildForRequest(self.wrapper, request) d = request.notifyFinish() def cbFinished(result): self.assertEqual(request.responseCode, 401) d.addCallback(cbFinished) request.render(child) return d def _invalidAuthorizationTest(self, response): """ Create a request with the given value as the value of an I{Authorization} header and perform resource traversal with it, starting at C{self.wrapper}. Assert that the result is a 401 response code. Return a L{Deferred} which fires when this is all done. """ self.credentialFactories.append(BasicCredentialFactory('example.com')) request = self.makeRequest([self.childName]) request.headers['authorization'] = response child = getChildForRequest(self.wrapper, request) d = request.notifyFinish() def cbFinished(result): self.assertEqual(request.responseCode, 401) d.addCallback(cbFinished) request.render(child) return d def test_getChildWithDefaultUnauthorizedUser(self): """ Resource traversal which enouncters an L{HTTPAuthSessionWrapper} results in an L{UnauthorizedResource} when the request has an I{Authorization} header with a user which does not exist. """ return self._invalidAuthorizationTest('Basic ' + b64encode('foo:bar')) def test_getChildWithDefaultUnauthorizedPassword(self): """ Resource traversal which enouncters an L{HTTPAuthSessionWrapper} results in an L{UnauthorizedResource} when the request has an I{Authorization} header with a user which exists and the wrong password. """ return self._invalidAuthorizationTest( 'Basic ' + b64encode(self.username + ':bar')) def test_getChildWithDefaultUnrecognizedScheme(self): """ Resource traversal which enouncters an L{HTTPAuthSessionWrapper} results in an L{UnauthorizedResource} when the request has an I{Authorization} header with an unrecognized scheme. """ return self._invalidAuthorizationTest('Quux foo bar baz') def test_getChildWithDefaultAuthorized(self): """ Resource traversal which encounters an L{HTTPAuthSessionWrapper} results in an L{IResource} which renders the L{IResource} avatar retrieved from the portal when the request has a valid I{Authorization} header. """ self.credentialFactories.append(BasicCredentialFactory('example.com')) request = self.makeRequest([self.childName]) child = self._authorizedBasicLogin(request) d = request.notifyFinish() def cbFinished(ignored): self.assertEqual(request.written, [self.childContent]) d.addCallback(cbFinished) request.render(child) return d def test_renderAuthorized(self): """ Resource traversal which terminates at an L{HTTPAuthSessionWrapper} and includes correct authentication headers results in the L{IResource} avatar (not one of its children) retrieved from the portal being rendered. """ self.credentialFactories.append(BasicCredentialFactory('example.com')) # Request it exactly, not any of its children. request = self.makeRequest([]) child = self._authorizedBasicLogin(request) d = request.notifyFinish() def cbFinished(ignored): self.assertEqual(request.written, [self.avatarContent]) d.addCallback(cbFinished) request.render(child) return d def test_getChallengeCalledWithRequest(self): """ When L{HTTPAuthSessionWrapper} finds an L{ICredentialFactory} to issue a challenge, it calls the C{getChallenge} method with the request as an argument. """ class DumbCredentialFactory(object): implements(ICredentialFactory) scheme = 'dumb' def __init__(self): self.requests = [] def getChallenge(self, request): self.requests.append(request) return {} factory = DumbCredentialFactory() self.credentialFactories.append(factory) request = self.makeRequest([self.childName]) child = getChildForRequest(self.wrapper, request) d = request.notifyFinish() def cbFinished(ignored): self.assertEqual(factory.requests, [request]) d.addCallback(cbFinished) request.render(child) return d def _logoutTest(self): """ Issue a request for an authentication-protected resource using valid credentials and then return the C{DummyRequest} instance which was used. This is a helper for tests about the behavior of the logout callback. """ self.credentialFactories.append(BasicCredentialFactory('example.com')) class SlowerResource(Resource): def render(self, request): return NOT_DONE_YET self.avatar.putChild(self.childName, SlowerResource()) request = self.makeRequest([self.childName]) child = self._authorizedBasicLogin(request) request.render(child) self.assertEqual(self.realm.loggedOut, 0) return request def test_logout(self): """ The realm's logout callback is invoked after the resource is rendered. """ request = self._logoutTest() request.finish() self.assertEqual(self.realm.loggedOut, 1) def test_logoutOnError(self): """ The realm's logout callback is also invoked if there is an error generating the response (for example, if the client disconnects early). """ request = self._logoutTest() request.processingFailed( Failure(ConnectionDone("Simulated disconnect"))) self.assertEqual(self.realm.loggedOut, 1) def test_decodeRaises(self): """ Resource traversal which enouncters an L{HTTPAuthSessionWrapper} results in an L{UnauthorizedResource} when the request has a I{Basic Authorization} header which cannot be decoded using base64. """ self.credentialFactories.append(BasicCredentialFactory('example.com')) request = self.makeRequest([self.childName]) request.headers['authorization'] = 'Basic decode should fail' child = getChildForRequest(self.wrapper, request) self.assertIsInstance(child, UnauthorizedResource) def test_selectParseResponse(self): """ L{HTTPAuthSessionWrapper._selectParseHeader} returns a two-tuple giving the L{ICredentialFactory} to use to parse the header and a string containing the portion of the header which remains to be parsed. """ basicAuthorization = 'Basic abcdef123456' self.assertEqual( self.wrapper._selectParseHeader(basicAuthorization), (None, None)) factory = BasicCredentialFactory('example.com') self.credentialFactories.append(factory) self.assertEqual( self.wrapper._selectParseHeader(basicAuthorization), (factory, 'abcdef123456')) def test_unexpectedDecodeError(self): """ Any unexpected exception raised by the credential factory's C{decode} method results in a 500 response code and causes the exception to be logged. """ class UnexpectedException(Exception): pass class BadFactory(object): scheme = 'bad' def getChallenge(self, client): return {} def decode(self, response, request): raise UnexpectedException() self.credentialFactories.append(BadFactory()) request = self.makeRequest([self.childName]) request.headers['authorization'] = 'Bad abc' child = getChildForRequest(self.wrapper, request) request.render(child) self.assertEqual(request.responseCode, 500) self.assertEqual(len(self.flushLoggedErrors(UnexpectedException)), 1) def test_unexpectedLoginError(self): """ Any unexpected failure from L{Portal.login} results in a 500 response code and causes the failure to be logged. """ class UnexpectedException(Exception): pass class BrokenChecker(object): credentialInterfaces = (IUsernamePassword,) def requestAvatarId(self, credentials): raise UnexpectedException() self.portal.registerChecker(BrokenChecker()) self.credentialFactories.append(BasicCredentialFactory('example.com')) request = self.makeRequest([self.childName]) child = self._authorizedBasicLogin(request) request.render(child) self.assertEqual(request.responseCode, 500) self.assertEqual(len(self.flushLoggedErrors(UnexpectedException)), 1) def test_anonymousAccess(self): """ Anonymous requests are allowed if a L{Portal} has an anonymous checker registered. """ unprotectedContents = "contents of the unprotected child resource" self.avatars[ANONYMOUS] = Resource() self.avatars[ANONYMOUS].putChild( self.childName, Data(unprotectedContents, 'text/plain')) self.portal.registerChecker(AllowAnonymousAccess()) self.credentialFactories.append(BasicCredentialFactory('example.com')) request = self.makeRequest([self.childName]) child = getChildForRequest(self.wrapper, request) d = request.notifyFinish() def cbFinished(ignored): self.assertEqual(request.written, [unprotectedContents]) d.addCallback(cbFinished) request.render(child) return d
mit
pitch-sands/i-MPI
flask/Lib/site-packages/pip-1.5.6-py2.7.egg/pip/commands/__init__.py
476
2236
""" Package containing all pip commands """ from pip.commands.bundle import BundleCommand from pip.commands.completion import CompletionCommand from pip.commands.freeze import FreezeCommand from pip.commands.help import HelpCommand from pip.commands.list import ListCommand from pip.commands.search import SearchCommand from pip.commands.show import ShowCommand from pip.commands.install import InstallCommand from pip.commands.uninstall import UninstallCommand from pip.commands.unzip import UnzipCommand from pip.commands.zip import ZipCommand from pip.commands.wheel import WheelCommand commands = { BundleCommand.name: BundleCommand, CompletionCommand.name: CompletionCommand, FreezeCommand.name: FreezeCommand, HelpCommand.name: HelpCommand, SearchCommand.name: SearchCommand, ShowCommand.name: ShowCommand, InstallCommand.name: InstallCommand, UninstallCommand.name: UninstallCommand, UnzipCommand.name: UnzipCommand, ZipCommand.name: ZipCommand, ListCommand.name: ListCommand, WheelCommand.name: WheelCommand, } commands_order = [ InstallCommand, UninstallCommand, FreezeCommand, ListCommand, ShowCommand, SearchCommand, WheelCommand, ZipCommand, UnzipCommand, BundleCommand, HelpCommand, ] def get_summaries(ignore_hidden=True, ordered=True): """Yields sorted (command name, command summary) tuples.""" if ordered: cmditems = _sort_commands(commands, commands_order) else: cmditems = commands.items() for name, command_class in cmditems: if ignore_hidden and command_class.hidden: continue yield (name, command_class.summary) def get_similar_commands(name): """Command name auto-correct.""" from difflib import get_close_matches close_commands = get_close_matches(name, commands.keys()) if close_commands: guess = close_commands[0] else: guess = False return guess def _sort_commands(cmddict, order): def keyfn(key): try: return order.index(key[1]) except ValueError: # unordered items should come last return 0xff return sorted(cmddict.items(), key=keyfn)
bsd-3-clause
tumbl3w33d/ansible
lib/ansible/modules/cloud/azure/azure_rm_containerregistry_info.py
14
9262
#!/usr/bin/python # # Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_containerregistry_info version_added: "2.9" short_description: Get Azure Container Registry facts description: - Get facts for Container Registry. options: resource_group: description: - The name of the resource group to which the container registry belongs. required: True name: description: - The name of the container registry. retrieve_credentials: description: - Retrieve credentials for container registry. type: bool default: no tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. extends_documentation_fragment: - azure author: - Zim Kalinowski (@zikalino) ''' EXAMPLES = ''' - name: Get instance of Registry azure_rm_containerregistry_info: resource_group: myResourceGroup name: myRegistry - name: List instances of Registry azure_rm_containerregistry_info: resource_group: myResourceGroup ''' RETURN = ''' registries: description: - A list of dictionaries containing facts for registries. returned: always type: complex contains: id: description: - The resource ID. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registr ies/myRegistry" name: description: - The name of the resource. returned: always type: str sample: myRegistry location: description: - The location of the resource. This cannot be changed after the resource is created. returned: always type: str sample: westus admin_user_enabled: description: - Is admin user enabled. returned: always type: bool sample: yes sku: description: - The SKU name of the container registry. returned: always type: str sample: Premium provisioning_state: description: - Provisioning state of the container registry. returned: always type: str sample: Succeeded login_server: description: - Login server for the registry. returned: always type: str sample: acrd08521b.azurecr.io credentials: description: - Credentials, fields will be empty if admin user is not enabled for ACR. returned: when C(retrieve_credentials) is set and C(admin_user_enabled) is set on ACR type: complex contains: username: description: - The user name for container registry. returned: when registry exists and C(admin_user_enabled) is set type: str sample: zim password: description: - password value. returned: when registry exists and C(admin_user_enabled) is set type: str sample: pass1value password2: description: - password2 value. returned: when registry exists and C(admin_user_enabled) is set type: str sample: pass2value tags: description: - Tags assigned to the resource. Dictionary of string:string pairs. type: dict sample: { "tag1": "abc" } ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError from msrestazure.azure_operation import AzureOperationPoller from azure.mgmt.containerregistry import ContainerRegistryManagementClient from msrest.serialization import Model except ImportError: # This is handled in azure_rm_common pass class AzureRMContainerRegistryInfo(AzureRMModuleBase): def __init__(self): # define user inputs into argument self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), name=dict( type='str' ), tags=dict( type='list' ), retrieve_credentials=dict( type='bool', default=False ) ) # store the results of the module operation self.results = dict( changed=False ) self.resource_group = None self.name = None self.retrieve_credentials = False super(AzureRMContainerRegistryInfo, self).__init__(self.module_arg_spec, supports_tags=False) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_containerregistry_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_containerregistry_facts' module has been renamed to 'azure_rm_containerregistry_info'", version='2.13') for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if self.name: self.results['registries'] = self.get() elif self.resource_group: self.results['registries'] = self.list_by_resource_group() else: self.results['registries'] = self.list_all() return self.results def get(self): response = None results = [] try: response = self.containerregistry_client.registries.get(resource_group_name=self.resource_group, registry_name=self.name) self.log("Response : {0}".format(response)) except CloudError as e: self.log('Could not get facts for Registries.') if response is not None: if self.has_tags(response.tags, self.tags): results.append(self.format_item(response)) return results def list_all(self): response = None results = [] try: response = self.containerregistry_client.registries.list() self.log("Response : {0}".format(response)) except CloudError as e: self.fail('Could not get facts for Registries.') if response is not None: for item in response: if self.has_tags(item.tags, self.tags): results.append(self.format_item(item)) return results def list_by_resource_group(self): response = None results = [] try: response = self.containerregistry_client.registries.list_by_resource_group(resource_group_name=self.resource_group) self.log("Response : {0}".format(response)) except CloudError as e: self.fail('Could not get facts for Registries.') if response is not None: for item in response: if self.has_tags(item.tags, self.tags): results.append(self.format_item(item)) return results def format_item(self, item): d = item.as_dict() resource_group = d['id'].split('resourceGroups/')[1].split('/')[0] name = d['name'] credentials = {} admin_user_enabled = d['admin_user_enabled'] if self.retrieve_credentials and admin_user_enabled: credentials = self.containerregistry_client.registries.list_credentials(resource_group, name).as_dict() for index in range(len(credentials['passwords'])): password = credentials['passwords'][index] if password['name'] == 'password': credentials['password'] = password['value'] elif password['name'] == 'password2': credentials['password2'] = password['value'] credentials.pop('passwords') d = { 'resource_group': resource_group, 'name': d['name'], 'location': d['location'], 'admin_user_enabled': admin_user_enabled, 'sku': d['sku']['tier'].lower(), 'provisioning_state': d['provisioning_state'], 'login_server': d['login_server'], 'id': d['id'], 'tags': d.get('tags', None), 'credentials': credentials } return d def main(): AzureRMContainerRegistryInfo() if __name__ == '__main__': main()
gpl-3.0
DARKPOP/external_chromium_org_third_party_skia
gm/rebaseline_server/compare_configs_test.py
20
1691
#!/usr/bin/python """ Copyright 2014 Google Inc. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. Test compare_configs.py TODO(epoger): Create a command to update the expected results (in self._output_dir_expected) when appropriate. For now, you should: 1. examine the results in self.output_dir_actual and make sure they are ok 2. rm -rf self._output_dir_expected 3. mv self.output_dir_actual self._output_dir_expected Although, if you're using an SVN checkout, this will blow away .svn directories within self._output_dir_expected, which wouldn't be good... """ # System-level imports import os # Must fix up PYTHONPATH before importing from within Skia import rs_fixpypath # pylint: disable=W0611 # Imports from within Skia import base_unittest import compare_configs import gm_json import results class CompareConfigsTest(base_unittest.TestCase): def test_gm(self): """Process results of a GM run with the ConfigComparisons object.""" results_obj = compare_configs.ConfigComparisons( configs=('8888', 'gpu'), actuals_root=os.path.join(self.input_dir, 'gm-actuals'), generated_images_root=self.temp_dir, diff_base_url='/static/generated-images') results_obj.get_timestamp = mock_get_timestamp gm_json.WriteToFile( results_obj.get_packaged_results_of_type( results.KEY__HEADER__RESULTS_ALL), os.path.join(self.output_dir_actual, 'gm.json')) def mock_get_timestamp(): """Mock version of BaseComparisons.get_timestamp() for testing.""" return 12345678 def main(): base_unittest.main(CompareConfigsTest) if __name__ == '__main__': main()
bsd-3-clause
CSD-Public/stonix
src/MacBuild/ramdisk/lib/libHelperFunctions.py
1
11052
""" Helper functions, OS agnostic @author: Roy Nielsen """ #--- Native python libraries import re import os import sys import time import ctypes import traceback from subprocess import Popen, STDOUT, PIPE try: import termios except: pass #--- non-native python libraries in this source tree from . loggers import CyLogger from . loggers import LogPriority as lp from . run_commands import RunWith logger = CyLogger() run = RunWith(logger) def getOsFamily(): '''Get the os name from the "uname -s" command @author: Roy Nielsen ''' operatingsystemfamily = sys.platform return operatingsystemfamily ########################################################################### class FoundException(Exception) : '''Exeption to raise when the condition is met in a for/while Accompanying code (in collect_for_hostmaster.py) derived from example in "Rapid GUI Programming with Python and QT" pgs 66 - 71, by Mark Summerfeild For more examples on python user defined exceptions: http://docs.python.org/2/tutorial/errors.html ''' pass ############################################################################## def get_console_user(): '''Get the user that owns the console on the Mac. This user is the user that is logged in to the GUI. ''' user = False cmd = ["/usr/bin/stat", "-f", "'%Su'", "/dev/console"] try: retval = Popen(cmd, stdout=PIPE, stderr=STDOUT).communicate()[0] space_stripped = str(retval).strip() quote_stripped = str(space_stripped).strip("'") except Exception as err: logger.log(lp.VERBOSE, "Exception trying to get the console user...") logger.log(lp.VERBOSE, "Associated exception: " + str(err)) logger.log(lp.WARNING, traceback.format_exc()) logger.log(lp.WARNING, str(err)) raise err else: """ LANL's environment has chosen the regex below as a valid match for usernames on the network. """ if re.match("^[A-Za-z][A-Za-z1-9_]+$", quote_stripped): user = str(quote_stripped) logger.log(lp.VERBOSE, "user: " + str(user)) return user ########################################################################### def is_valid_pn(random_pn=0) : '''Validate that the property number is seven digits. @author: Roy Nielsen :param random_pn: (Default value = 0) ''' retval = True # Need to check for 7 decimal places if not re.match("^\d\d\d\d\d\d\d$", str(random_pn)): logger.log(lp.VERBOSE, "PN is not valid...") retval = False else: logger.log(lp.VERBOSE, "PN \"" + str(random_pn) + "\" is valid") return retval ########################################################################### def get_darwin_mac() : '''Get the mac address and place it in net_hw_addr Future METHOD: Use the "ifconfig" command - look for the "active" interface - collect "interface", "mac", "ipaddr" to return. PATH to ifconfig may be specific to the Mac. Description: Runs the networksetup -listallhardwareports, processing the output to get the network interface mac address. Specific to the Mac. @author: Roy Nielsen ''' found = 0 output = Popen(["/usr/sbin/networksetup", "-listallhardwareports"], stdout=PIPE, stderr=STDOUT).communicate()[0] try : for line in output.split("\n") : match_hw_addr = re.compile \ ("^Ethernet Address:\s+(\w+:\w+:\w+:\w+:\w+:\w+)\s*$") if re.match("^Device:\s+(\w+)\s*$", line) : found = 1 if re.match \ ("^Ethernet Address:\s+(\w+:\w+:\w+:\w+:\w+:\w+)\s*$", \ line) and found == 1 : raise FoundException except FoundException : hw_addr = match_hw_addr.search(line) net_hw_addr = hw_addr.group(1) # net_hw_addr except Exception as err: logger.log(lp.VERBOSE, "Error attempting to acquire MAC address...") logger.log(lp.VERBOSE, "Exception: " + str(err)) raise err else : net_hw_addr = "No MAC addr found" logger.log(lp.VERBOSE, "No MAC address found") return net_hw_addr ########################################################################### def is_laptop(): '''Determine if the machine this is currently running on is a laptop @author: Roy Nielsen ''' isThisALaptop = False cmd = ["/usr/sbin/system_profiler", "SPHardwareDataType"] retval, reterr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate() if not reterr : if retval : for line in retval.split("\n") : if re.match("^\s+Model Name:", line) : if re.search("[bB]ook", line) : isThisALaptop = True break else : logger.log(lp.VERBOSE, "Error processing system_profiler output...") else : logger.log(lp.VERBOSE, "Error processing system_profiler output: " + str(reterr)) return isThisALaptop ########################################################################### def touch(filename=""): '''Python implementation of the touch command.. :param filename: (Default value = "") ''' if re.match("^\s*$", filename) : logger.log(lp.INFO, "Cannot touch a file without a filename....") else : try: os.utime(filename, None) except: try : open(filename, 'a').close() except Exception as err : logger.log(lp.INFO, "Cannot open to touch: " + str(filename)) ########################################################################### def installFdeUser(myusername="", mypassword="") : '''Create an input plist for the fdesetup command to enable a user in the filevault login screen @author: Roy Nielsen :param myusername: (Default value = "") :param mypassword: (Default value = "") ''' success = False logger.log(lp.DEBUG, "Starting installFdeUser...") if re.match("^\s*$", myusername) : logger.log(lp.INFO, "Empty username: '" + str(myusername) + "'") elif re.match("^\s*$", mypassword) : logger.log(lp.INFO, "Empty password: '" + str(mypassword) + "'") if re.match("^\s*$", myusername) or re.match("^\s*$", mypassword) : logger.log(lp.INFO, "in buildInputPlist -- cannot build the plist with an empty username or password...") return success plist = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + \ "<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n" + \ "<plist version=\"1.0\">\n" + \ "\t<dict>\n" + \ "\t\t<key>Username</key>\n" + \ "\t\t<string>" + str(myusername) + "</string>\n" + \ "\t\t<key>Password</key>\n" + \ "\t\t<string>" + str(mypassword) + "</string>\n" + \ "\t</dict>\n</plist>" ##### # Do the fdesetup command cmd = ["/usr/bin/fdesetup", "enable", "-outputplist", "-inputplist"] logger.log(lp.DEBUG, "Command: " + str(cmd)) proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) (retval, reterr) = proc.communicate(plist + "\n") logger.log(lp.DEBUG, "retval: " + str(retval)) logger.log(lp.DEBUG, "reterr: " + str(reterr)) if not reterr: success = True logger.log(lp.DEBUG, "Installed an Fde User...") return success ########################################################################### def removeFdeUser(myusername=""): '''Remove a user from the FDE login screen @author: Roy Nielsen :param myusername: (Default value = "") ''' success = False if re.match("^\s+$", myusername) or not myusername: logger.log(lp.INFO, "Empty username: '" + str(myusername) + "'") return success cmd = ["/usr/bin/fdesetup", "remove", myusername] run.setCommand(cmd) run.communicate() if not run.getStderr(): success = True return success ############################################################################ def touch(filename=""): '''Python implementation of the touch command.. :param filename: (Default value = "") ''' if re.match("^\s*$", filename) : logger.log(lp.INFO, "Cannot touch a file without a filename....") else : try: os.utime(filename, None) except: try : open(filename, 'a').close() except Exception as err : logger.log(lp.INFO, "Cannot open to touch: " + str(filename)) ########################################################################### def getecho (fileDescriptor): '''This returns the terminal echo mode. This returns True if echo is on or False if echo is off. Child applications that are expecting you to enter a password often set ECHO False. See waitnoecho(). Borrowed from pexpect - acceptable to license :param fileDescriptor: ''' attr = termios.tcgetattr(fileDescriptor) if attr[3] & termios.ECHO: return True return False ############################################################################ def waitnoecho (fileDescriptor, timeout=3): '''This waits until the terminal ECHO flag is set False. This returns True if the echo mode is off. This returns False if the ECHO flag was not set False before the timeout. This can be used to detect when the child is waiting for a password. Usually a child application will turn off echo mode when it is waiting for the user to enter a password. For example, instead of expecting the "password:" prompt you can wait for the child to set ECHO off:: see below in runAsWithSudo If timeout is None or negative, then this method to block forever until ECHO flag is False. Borrowed from pexpect - acceptable to license :param fileDescriptor: :param timeout: (Default value = 3) ''' if timeout is not None and timeout > 0: end_time = time.time() + timeout while True: if not getecho(fileDescriptor): return True if timeout < 0 and timeout is not None: return False if timeout is not None: timeout = end_time - time.time() time.sleep(0.1) ########################################################################### def isSaneFilePath(filepath): '''Check for a good file path in the passed in string. @author: Roy Nielsen :param filepath: ''' sane = False if filepath and isinstance(filepath, str): if re.match("^[A-Za-z0-9_\-/\.]*", filepath): sane = True return sane ###########################################################################
gpl-2.0
JosephRedfern/dominator
dominate.py
1
2057
import cv2 import numpy as np class Dominator(object): def __init__(self, domino_filename): self.domino_image = cv2.imread(domino_filename) def detect_blobs(self, image=None): if image is None: image = self.domino_image params = cv2.SimpleBlobDetector_Params() params.filterByCircularity = True params.minCircularity = 0.85 detector = cv2.SimpleBlobDetector_create(params) keypoints = detector.detect(image) print "Keypoint Count: {0}".format(len(keypoints)) overlayed_blobs = cv2.drawKeypoints(image, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DEFAULT) return overlayed_blobs def get_mask(self, image=None): if image is None: image = self.domino_image self.show_image(image=image) gray_scene = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) _, thresh = cv2.threshold(gray_scene, 200, 255, cv2.THRESH_BINARY) self.show_image(image=thresh) kernel = np.ones((25,25),np.uint8) closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel) ret, labels, stats, centroids = cv2.connectedComponentsWithStats(closed, connectivity=4) for i in range(2, ret): im = labels[labels==i] print im.shape # self.show_image(image = labels[labels==i]) print ret print labels print stats print centroids return closed def show_image(self, title="Image Preview", image=None): if image is None: image = self.domino_image cv2.imshow(title, image)#title, cv2.resize(image, (0,0), fx=0.2, fy=0.2)) while cv2.waitKey(100) != 1048603: pass if __name__ == "__main__": dm = Dominator("data/IMG_4332.JPG") mask = dm.get_mask() dm.show_image(image=mask) masked = cv2.bitwise_and(dm.domino_image, dm.domino_image, mask=mask) dm.show_image(image=masked) blobby = dm.detect_blobs(image=masked) dm.show_image(image=blobby)
bsd-2-clause
glensc/node-gyp
gyp/pylib/gyp/MSVSToolFile.py
2736
1804
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import gyp.common import gyp.easy_xml as easy_xml class Writer(object): """Visual Studio XML tool file writer.""" def __init__(self, tool_file_path, name): """Initializes the tool file. Args: tool_file_path: Path to the tool file. name: Name of the tool file. """ self.tool_file_path = tool_file_path self.name = name self.rules_section = ['Rules'] def AddCustomBuildRule(self, name, cmd, description, additional_dependencies, outputs, extensions): """Adds a rule to the tool file. Args: name: Name of the rule. description: Description of the rule. cmd: Command line of the rule. additional_dependencies: other files which may trigger the rule. outputs: outputs of the rule. extensions: extensions handled by the rule. """ rule = ['CustomBuildRule', {'Name': name, 'ExecutionDescription': description, 'CommandLine': cmd, 'Outputs': ';'.join(outputs), 'FileExtensions': ';'.join(extensions), 'AdditionalDependencies': ';'.join(additional_dependencies) }] self.rules_section.append(rule) def WriteIfChanged(self): """Writes the tool file.""" content = ['VisualStudioToolFile', {'Version': '8.00', 'Name': self.name }, self.rules_section ] easy_xml.WriteXmlIfChanged(content, self.tool_file_path, encoding="Windows-1252")
mit
mrm-xiefan/lunania-ai
catvsdog/01_job/bottleneck.py
1
4370
import os import utils import config import traceback import logging.config from luna import LunaExcepion logging.config.fileConfig("logging.conf") logger = logging.getLogger() if __name__ == '__main__': try: logger.info("------ start ------") utils.lock() from keras.applications.vgg16 import VGG16 from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Activation, Dropout, Flatten, Dense from keras import optimizers from keras.utils.vis_utils import plot_model import numpy as np if not os.path.exists(config.result_dir): os.mkdir(config.result_dir) # VGG16モデルと学習済み重みをロード # Fully-connected層(FC)はいらないのでinclude_top=False) model = VGG16(include_top=False, weights='imagenet') # model.summary() # 訓練データを生成するジェネレータを作成 train_datagen = ImageDataGenerator(rescale=1.0 / 255) train_generator = train_datagen.flow_from_directory( config.train_dir, target_size=(150, 150), batch_size=32, class_mode=None, shuffle=False ) # ジェネレータから生成される画像を入力し、VGG16の出力をファイルに保存 bottleneck_features_train = model.predict_generator(train_generator, 2000/32) np.save(os.path.join(config.result_dir, 'bottleneck_features_train.npy'), bottleneck_features_train) # 検証データを生成するジェネレータを作成 test_datagen = ImageDataGenerator(rescale=1.0 / 255) validation_generator = test_datagen.flow_from_directory( config.validation_dir, target_size=(150, 150), batch_size=32, class_mode=None, shuffle=False ) # ジェネレータから生成される画像を入力し、VGG16の出力をファイルに保存 bottleneck_features_validation = model.predict_generator(validation_generator, 800/32) np.save(os.path.join(config.result_dir, 'bottleneck_features_validation.npy'), bottleneck_features_validation) # print(train_generator.class_indices) # 訓練データをロード # ジェネレータではshuffle=Falseなので最初の1000枚がcat、次の1000枚がdog train_data = np.load(os.path.join(config.result_dir, 'bottleneck_features_train.npy')) train_labels = np.array([0] * int(2000 / 2) + [1] * int(2000 / 2)) # バリデーションデータをロード validation_data = np.load(os.path.join(config.result_dir, 'bottleneck_features_validation.npy')) validation_labels = np.array([0] * int(800 / 2) + [1] * int(800 / 2)) # FCネットワークを構築 model = Sequential() model.add(Flatten(input_shape=train_data.shape[1:])) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) model.compile( loss='binary_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['accuracy'] ) #model.summary() plot_model(model, to_file='model.png') # 訓練 history = model.fit( train_data, train_labels, batch_size=32, epochs=50, validation_data=(validation_data, validation_labels) ) utils.plot_history(history) # 結果を保存 model.save(os.path.join(config.result_dir, 'bottleneck_model.h5')) model.save_weights(os.path.join(config.result_dir, 'bottleneck_weights.h5')) utils.save_history(history, os.path.join(config.result_dir, 'bottleneck_history.txt')) except (KeyboardInterrupt, SystemExit): utils.unlock() utils.error(config.syserr) except LunaExcepion as e: utils.error(e.value) if (e.value == config.locked): exit() logger.info("------ end ------") except Exception as e: logger.error(e) logger.error(traceback.format_exc()) utils.error(config.syserr) utils.unlock() logger.info("------ end ------")
mit
PaulWay/spacewalk
backend/common/rhn_pkg.py
2
3980
# # Copyright (c) 2008--2014 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import os from spacewalk.common import checksum def get_package_header(filename=None, file_obj=None, fd=None): # pylint: disable=E1103 if filename is not None: stream = open(filename) need_close = True elif file_obj is not None: stream = file_obj else: stream = os.fdopen(os.dup(fd), "r") need_close = True if stream.name.endswith('.deb'): packaging = 'deb' elif stream.name.endswith('.rpm'): packaging = 'rpm' else: packaging = 'mpm' a_pkg = package_from_stream(stream, packaging) a_pkg.read_header() if need_close: stream.close() return a_pkg.header def package_from_stream(stream, packaging): if packaging == 'deb': import rhn_deb a_pkg = rhn_deb.DEB_Package(stream) elif packaging == 'rpm': import rhn_rpm a_pkg = rhn_rpm.RPM_Package(stream) elif packaging == 'mpm': import rhn_mpm a_pkg = rhn_mpm.MPM_Package(stream) else: a_pkg = None return a_pkg def package_from_filename(filename): if filename.endswith('.deb'): packaging = 'deb' elif filename.endswith('.rpm'): packaging = 'rpm' else: packaging = 'mpm' stream = open(filename) return package_from_stream(stream, packaging) BUFFER_SIZE = 16384 DEFAULT_CHECKSUM_TYPE = 'md5' class A_Package: """virtual class that implements shared methods for RPM/MPM/DEB package object""" # pylint: disable=R0902 def __init__(self, input_stream=None): self.header = None self.header_start = 0 self.header_end = 0 self.input_stream = input_stream self.checksum_type = DEFAULT_CHECKSUM_TYPE self.checksum = None self.payload_stream = None self.payload_size = None def read_header(self): """reads header from self.input_file""" pass def save_payload(self, output_stream): """saves payload to output_stream""" c_hash = checksum.getHashlibInstance(self.checksum_type, False) if output_stream: output_start = output_stream.tell() self._stream_copy(self.input_stream, output_stream, c_hash) self.checksum = c_hash.hexdigest() if output_stream: self.payload_stream = output_stream self.payload_size = output_stream.tell() - output_start def payload_checksum(self): # just read and compute checksum start = self.input_stream.tell() self.save_payload(None) self.payload_size = self.input_stream.tell() - start + self.header_end self.payload_stream = self.input_stream @staticmethod def _stream_copy(source, dest, c_hash=None): """copies data from the source stream to the destination stream""" while True: buf = source.read(BUFFER_SIZE) if not buf: break if dest: dest.write(buf) if c_hash: c_hash.update(buf) @staticmethod def _read_bytes(stream, amt): ret = "" while amt: buf = stream.read(min(amt, BUFFER_SIZE)) if not buf: return ret ret = ret + buf amt = amt - len(buf) return ret class InvalidPackageError(Exception): pass
gpl-2.0
furious-luke/django-yabl
yabl/templatetags/yabl.py
1
1699
from django import template from ..renderer import Renderer from ..utils import update_get as u_update_get register = template.Library() renderer = Renderer() class RequestException(Exception): pass @register.simple_tag def render_label(field, **kwargs): return renderer.render_label(field, **kwargs) @register.simple_tag def render_input(field, **kwargs): return renderer.render_input(field, **kwargs) @register.simple_tag def render_multiinput(field, idx, **kwargs): return renderer.render_multiinput(field, idx, **kwargs) @register.simple_tag def render_help(field, **kwargs): return renderer.render_errors(field, **kwargs) @register.simple_tag def render_errors(field, **kwargs): return renderer.render_errors(field, **kwargs) @register.simple_tag def render_inner_field(field, **kwargs): return renderer.render_inner_field(field, **kwargs) @register.simple_tag def render_field(field, **kwargs): return renderer.render_field(field, **kwargs) @register.simple_tag def render_submit(**kwargs): return renderer.render_submit(**kwargs) @register.simple_tag def render_form(form, **kwargs): return renderer.render_form(form, **kwargs) @register.simple_tag def merge_css(*args): return utils.merge_css(*args) @register.simple_tag def merge_js(*args): return utils.merge_js(*args) @register.simple_tag def render_pagination(page_obj, request, **kwargs): return renderer.render_pagination(page_obj, request, **kwargs) @register.simple_tag def update_get(request, **kwargs): if not request: raise RequestException('Error: Please ensure the request context processor is enabled.') return u_update_get(request, **kwargs)
mit
aimas/TuniErp-8.0
addons/hr_contract/__openerp__.py
1
1837
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Employee Contracts', 'version': '1.0', 'category': 'Human Resources', 'description': """ Add all information on the employee form to manage contracts. ============================================================= * Contract * Place of Birth, * Medical Examination Date * Company Vehicle You can assign several contracts per employee. """, 'author': 'OpenERP SA', 'website': 'https://www.tunierp.com/page/employees', 'depends': ['base_action_rule', 'hr'], 'data': [ 'security/ir.model.access.csv', 'hr_contract_view.xml', 'hr_contract_data.xml', 'base_action_rule_view.xml', ], 'demo': [], 'test': ['test/test_hr_contract.yml'], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
MojieBuddhist/linux-1
scripts/gdb/linux/dmesg.py
367
2005
# # gdb helper commands and functions for Linux kernel debugging # # kernel log buffer dump # # Copyright (c) Siemens AG, 2011, 2012 # # Authors: # Jan Kiszka <jan.kiszka@siemens.com> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb import string from linux import utils class LxDmesg(gdb.Command): """Print Linux kernel log buffer.""" def __init__(self): super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16) log_first_idx = int(gdb.parse_and_eval("log_first_idx")) log_next_idx = int(gdb.parse_and_eval("log_next_idx")) log_buf_len = int(gdb.parse_and_eval("log_buf_len")) inf = gdb.inferiors()[0] start = log_buf_addr + log_first_idx if log_first_idx < log_next_idx: log_buf_2nd_half = -1 length = log_next_idx - log_first_idx log_buf = inf.read_memory(start, length) else: log_buf_2nd_half = log_buf_len - log_first_idx log_buf = inf.read_memory(start, log_buf_2nd_half) + \ inf.read_memory(log_buf_addr, log_next_idx) pos = 0 while pos < log_buf.__len__(): length = utils.read_u16(log_buf[pos + 8:pos + 10]) if length == 0: if log_buf_2nd_half == -1: gdb.write("Corrupted log buffer!\n") break pos = log_buf_2nd_half continue text_len = utils.read_u16(log_buf[pos + 10:pos + 12]) text = log_buf[pos + 16:pos + 16 + text_len] time_stamp = utils.read_u64(log_buf[pos:pos + 8]) for line in memoryview(text).tobytes().splitlines(): gdb.write("[{time:12.6f}] {line}\n".format( time=time_stamp / 1000000000.0, line=line)) pos += length LxDmesg()
gpl-2.0
plq/spyne
spyne/protocol/_outbase.py
2
30422
# # spyne - Copyright (C) Spyne contributors. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 # from __future__ import print_function, unicode_literals import logging logger = logging.getLogger(__name__) import re import uuid import errno from os.path import isabs, join, abspath from collections import deque from datetime import datetime from decimal import Decimal as D from mmap import mmap, ACCESS_READ from time import mktime, strftime try: from lxml import etree from lxml import html except ImportError: etree = None html = None from spyne.protocol._base import ProtocolMixin from spyne.model import ModelBase, XmlAttribute, SimpleModel, Null, \ ByteArray, File, ComplexModelBase, AnyXml, AnyHtml, Unicode, Decimal, \ Double, Integer, Time, DateTime, Uuid, Duration, Boolean, AnyDict, \ AnyUri, PushBase, Date from spyne.model.relational import FileData from spyne.const.http import HTTP_400, HTTP_401, HTTP_404, HTTP_405, HTTP_413, \ HTTP_500 from spyne.error import Fault, InternalError, ResourceNotFoundError, \ RequestTooLongError, RequestNotAllowed, InvalidCredentialsError from spyne.model.binary import binary_encoding_handlers, \ BINARY_ENCODING_USE_DEFAULT from spyne.util import six from spyne.util.cdict import cdict class OutProtocolBase(ProtocolMixin): """This is the abstract base class for all out protocol implementations. Child classes can implement only the required subset of the public methods. An output protocol must implement :func:`serialize` and :func:`create_out_string`. The OutProtocolBase class supports the following events: * ``before_serialize``: Called before after the serialization operation is attempted. * ``after_serialize``: Called after the serialization operation is finished. The arguments the constructor takes are as follows: :param app: The application this protocol belongs to. :param mime_type: The mime_type this protocol should set for transports that support this. This is a quick way to override the mime_type by default instead of subclassing the releavant protocol implementation. :param ignore_uncap: Silently ignore cases when the protocol is not capable of serializing return values instead of raising a TypeError. """ def __init__(self, app=None, mime_type=None, ignore_uncap=False, ignore_wrappers=False, binary_encoding=None, string_encoding=None): super(OutProtocolBase, self).__init__(app=app, mime_type=mime_type, ignore_wrappers=ignore_wrappers, binary_encoding=binary_encoding, string_encoding=string_encoding) self.ignore_uncap = ignore_uncap self.message = None if mime_type is not None: self.mime_type = mime_type self._to_bytes_handlers = cdict({ ModelBase: self.model_base_to_bytes, File: self.file_to_bytes, Time: self.time_to_bytes, Uuid: self.uuid_to_bytes, Null: self.null_to_bytes, Date: self.date_to_bytes, Double: self.double_to_bytes, AnyXml: self.any_xml_to_bytes, Unicode: self.unicode_to_bytes, Boolean: self.boolean_to_bytes, Decimal: self.decimal_to_bytes, Integer: self.integer_to_bytes, AnyHtml: self.any_html_to_bytes, DateTime: self.datetime_to_bytes, Duration: self.duration_to_bytes, ByteArray: self.byte_array_to_bytes, XmlAttribute: self.xmlattribute_to_bytes, ComplexModelBase: self.complex_model_base_to_bytes, }) self._to_unicode_handlers = cdict({ ModelBase: self.model_base_to_unicode, File: self.file_to_unicode, Time: self.time_to_unicode, Date: self.date_to_unicode, Uuid: self.uuid_to_unicode, Null: self.null_to_unicode, Double: self.double_to_unicode, AnyXml: self.any_xml_to_unicode, AnyUri: self.any_uri_to_unicode, AnyDict: self.any_dict_to_unicode, AnyHtml: self.any_html_to_unicode, Unicode: self.unicode_to_unicode, Boolean: self.boolean_to_unicode, Decimal: self.decimal_to_unicode, Integer: self.integer_to_unicode, # FIXME: Would we need a to_unicode for localized dates? DateTime: self.datetime_to_unicode, Duration: self.duration_to_unicode, ByteArray: self.byte_array_to_unicode, XmlAttribute: self.xmlattribute_to_unicode, ComplexModelBase: self.complex_model_base_to_unicode, }) self._to_bytes_iterable_handlers = cdict({ File: self.file_to_bytes_iterable, ByteArray: self.byte_array_to_bytes_iterable, ModelBase: self.model_base_to_bytes_iterable, SimpleModel: self.simple_model_to_bytes_iterable, ComplexModelBase: self.complex_model_to_bytes_iterable, }) def serialize(self, ctx, message): """Serializes ``ctx.out_object``. If ctx.out_stream is not None, ``ctx.out_document`` and ``ctx.out_string`` are skipped and the response is written directly to ``ctx.out_stream``. :param ctx: :class:`MethodContext` instance. :param message: One of ``(ProtocolBase.REQUEST, ProtocolBase.RESPONSE)``. """ def create_out_string(self, ctx, out_string_encoding=None): """Uses ctx.out_document to set ctx.out_string""" def fault_to_http_response_code(self, fault): """Special function to convert native Python exceptions to Http response codes. """ if isinstance(fault, RequestTooLongError): return HTTP_413 if isinstance(fault, ResourceNotFoundError): return HTTP_404 if isinstance(fault, RequestNotAllowed): return HTTP_405 if isinstance(fault, InvalidCredentialsError): return HTTP_401 if isinstance(fault, Fault) and (fault.faultcode.startswith('Client.') or fault.faultcode == 'Client'): return HTTP_400 return HTTP_500 def set_validator(self, validator): """You must override this function if you want your protocol to support validation.""" assert validator is None self.validator = None def to_bytes(self, cls, value, *args, **kwargs): if value is None: return None handler = self._to_bytes_handlers[cls] retval = handler(cls, value, *args, **kwargs) # enable this only for testing. we're not as strict for performance # reasons # assert isinstance(retval, six.binary_type), \ # "AssertionError: %r %r %r handler: %r" % \ # (type(retval), six.binary_type, retval, handler) return retval def to_unicode(self, cls, value, *args, **kwargs): if value is None: return None handler = self._to_unicode_handlers[cls] retval = handler(cls, value, *args, **kwargs) # enable this only for testing. we're not as strict for performance # reasons as well as not to take the joy of dealing with duck typing # from the user # assert isinstance(retval, six.text_type), \ # "AssertionError: %r %r handler: %r" % \ # (type(retval), retval, handler) return retval def to_bytes_iterable(self, cls, value): if value is None: return [] if isinstance(value, PushBase): return value handler = self._to_bytes_iterable_handlers[cls] return handler(cls, value) def null_to_bytes(self, cls, value, **_): return b"" def null_to_unicode(self, cls, value, **_): return u"" def any_xml_to_bytes(self, cls, value, **_): return etree.tostring(value) def any_xml_to_unicode(self, cls, value, **_): return etree.tostring(value, encoding='unicode') def any_dict_to_unicode(self, cls, value, **_): return repr(value) def any_html_to_bytes(self, cls, value, **_): return html.tostring(value) def any_html_to_unicode(self, cls, value, **_): return html.tostring(value, encoding='unicode') def uuid_to_bytes(self, cls, value, suggested_encoding=None, **_): ser_as = self.get_cls_attrs(cls).serialize_as retval = self.uuid_to_unicode(cls, value, suggested_encoding=suggested_encoding, **_) if ser_as in ('bytes', 'bytes_le', 'fields', 'int', six.binary_type): return retval return retval.encode('ascii') def uuid_to_unicode(self, cls, value, suggested_encoding=None, **_): attr = self.get_cls_attrs(cls) ser_as = attr.serialize_as encoding = attr.encoding if encoding is None: encoding = suggested_encoding retval = _uuid_serialize[ser_as](value) if ser_as in ('bytes', 'bytes_le'): retval = binary_encoding_handlers[encoding]((retval,)) return retval def unicode_to_bytes(self, cls, value, **_): retval = value cls_attrs = self.get_cls_attrs(cls) if isinstance(value, six.text_type): if cls_attrs.encoding is not None: retval = value.encode(cls_attrs.encoding) elif self.default_string_encoding is not None: retval = value.encode(self.default_string_encoding) elif not six.PY2: logger.warning("You need to set either an encoding for %r " "or a default_string_encoding for %r", cls, self) if cls_attrs.str_format is not None: return cls_attrs.str_format.format(value) elif cls_attrs.format is not None: return cls_attrs.format % retval return retval def any_uri_to_unicode(self, cls, value, **_): return self.unicode_to_unicode(cls, value, **_) def unicode_to_unicode(self, cls, value, **_): # :))) cls_attrs = self.get_cls_attrs(cls) retval = value if isinstance(value, six.binary_type): if cls_attrs.encoding is not None: retval = value.decode(cls_attrs.encoding) if self.default_string_encoding is not None: retval = value.decode(self.default_string_encoding) elif not six.PY2: logger.warning("You need to set either an encoding for %r " "or a default_string_encoding for %r", cls, self) if cls_attrs.str_format is not None: return cls_attrs.str_format.format(value) elif cls_attrs.format is not None: return cls_attrs.format % retval return retval def decimal_to_bytes(self, cls, value, **_): return self.decimal_to_unicode(cls, value, **_).encode('utf8') def decimal_to_unicode(self, cls, value, **_): D(value) # sanity check cls_attrs = self.get_cls_attrs(cls) if cls_attrs.str_format is not None: return cls_attrs.str_format.format(value) elif cls_attrs.format is not None: return cls_attrs.format % value return str(value) def double_to_bytes(self, cls, value, **_): return self.double_to_unicode(cls, value, **_).encode('utf8') def double_to_unicode(self, cls, value, **_): float(value) # sanity check cls_attrs = self.get_cls_attrs(cls) if cls_attrs.str_format is not None: return cls_attrs.str_format.format(value) elif cls_attrs.format is not None: return cls_attrs.format % value return repr(value) def integer_to_bytes(self, cls, value, **_): return self.integer_to_unicode(cls, value, **_).encode('utf8') def integer_to_unicode(self, cls, value, **_): int(value) # sanity check cls_attrs = self.get_cls_attrs(cls) if cls_attrs.str_format is not None: return cls_attrs.str_format.format(value) elif cls_attrs.format is not None: return cls_attrs.format % value return str(value) def time_to_bytes(self, cls, value, **kwargs): return self.time_to_unicode(cls, value, **kwargs) def time_to_unicode(self, cls, value, **_): """Returns ISO formatted times.""" if isinstance(value, datetime): value = value.time() return value.isoformat() def date_to_bytes(self, cls, val, **_): return self.date_to_unicode(cls, val, **_).encode("utf8") def date_to_unicode(self, cls, val, **_): if isinstance(val, datetime): val = val.date() sa = self.get_cls_attrs(cls).serialize_as if sa is None or sa in (str, 'str'): return self._date_to_bytes(cls, val) return _datetime_smap[sa](cls, val) def datetime_to_bytes(self, cls, val, **_): retval = self.datetime_to_unicode(cls, val, **_) sa = self.get_cls_attrs(cls).serialize_as if sa is None or sa in (six.text_type, str, 'str'): return retval.encode('ascii') return retval def datetime_to_unicode(self, cls, val, **_): sa = self.get_cls_attrs(cls).serialize_as if sa is None or sa in (six.text_type, str, 'str'): return self._datetime_to_unicode(cls, val) return _datetime_smap[sa](cls, val) def duration_to_bytes(self, cls, value, **_): return self.duration_to_unicode(cls, value, **_).encode("utf8") def duration_to_unicode(self, cls, value, **_): if value.days < 0: value = -value negative = True else: negative = False tot_sec = int(value.total_seconds()) seconds = value.seconds % 60 minutes = value.seconds // 60 hours = minutes // 60 minutes %= 60 seconds = float(seconds) useconds = value.microseconds retval = deque() if negative: retval.append("-P") else: retval.append("P") if value.days != 0: retval.append("%iD" % value.days) if tot_sec != 0 and tot_sec % 86400 == 0 and useconds == 0: return ''.join(retval) retval.append('T') if hours > 0: retval.append("%iH" % hours) if minutes > 0: retval.append("%iM" % minutes) if seconds > 0 or useconds > 0: retval.append("%i" % seconds) if useconds > 0: retval.append(".%i" % useconds) retval.append("S") if len(retval) == 2: retval.append('0S') return ''.join(retval) def boolean_to_bytes(self, cls, value, **_): return str(bool(value)).lower().encode('ascii') def boolean_to_unicode(self, cls, value, **_): return str(bool(value)).lower() def byte_array_to_bytes(self, cls, value, suggested_encoding=None, **_): cls_attrs = self.get_cls_attrs(cls) encoding = cls_attrs.encoding if encoding is BINARY_ENCODING_USE_DEFAULT: if suggested_encoding is None: encoding = self.binary_encoding else: encoding = suggested_encoding if encoding is None and isinstance(value, (list, tuple)) \ and len(value) == 1 and isinstance(value[0], mmap): return value[0] encoder = binary_encoding_handlers[encoding] logger.debug("Using binary encoder %r for encoding %r", encoder, encoding) retval = encoder(value) if encoding is not None and isinstance(retval, six.text_type): retval = retval.encode('ascii') return retval def byte_array_to_unicode(self, cls, value, suggested_encoding=None, **_): encoding = self.get_cls_attrs(cls).encoding if encoding is BINARY_ENCODING_USE_DEFAULT: if suggested_encoding is None: encoding = self.binary_encoding else: encoding = suggested_encoding if encoding is None: raise ValueError("Arbitrary binary data can't be serialized to " "unicode") retval = binary_encoding_handlers[encoding](value) if not isinstance(retval, six.text_type): retval = retval.decode('ascii') return retval def byte_array_to_bytes_iterable(self, cls, value, **_): return value def file_to_bytes(self, cls, value, suggested_encoding=None): """ :param cls: A :class:`spyne.model.File` subclass :param value: Either a sequence of byte chunks or a :class:`spyne.model.File.Value` instance. """ encoding = self.get_cls_attrs(cls).encoding if encoding is BINARY_ENCODING_USE_DEFAULT: if suggested_encoding is None: encoding = self.binary_encoding else: encoding = suggested_encoding if isinstance(value, File.Value): if value.data is not None: return binary_encoding_handlers[encoding](value.data) if value.handle is not None: # maybe we should have used the sweeping except: here. if hasattr(value.handle, 'fileno'): if six.PY2: fileno = value.handle.fileno() data = (mmap(fileno, 0, access=ACCESS_READ),) else: import io try: fileno = value.handle.fileno() data = mmap(fileno, 0, access=ACCESS_READ) except io.UnsupportedOperation: data = (value.handle.read(),) else: data = (value.handle.read(),) return binary_encoding_handlers[encoding](data) if value.path is not None: handle = open(value.path, 'rb') fileno = handle.fileno() data = mmap(fileno, 0, access=ACCESS_READ) return binary_encoding_handlers[encoding](data) assert False, "Unhandled file type" if isinstance(value, FileData): try: return binary_encoding_handlers[encoding](value.data) except Exception as e: logger.error("Error encoding value to binary. Error: %r, Value: %r", e, value) raise try: return binary_encoding_handlers[encoding](value) except Exception as e: logger.error("Error encoding value to binary. Error: %r, Value: %r", e, value) raise def file_to_unicode(self, cls, value, suggested_encoding=None): """ :param cls: A :class:`spyne.model.File` subclass :param value: Either a sequence of byte chunks or a :class:`spyne.model.File.Value` instance. """ cls_attrs = self.get_cls_attrs(cls) encoding = cls_attrs.encoding if encoding is BINARY_ENCODING_USE_DEFAULT: encoding = suggested_encoding if encoding is None and cls_attrs.mode is File.TEXT: raise ValueError("Arbitrary binary data can't be serialized to " "unicode.") retval = self.file_to_bytes(cls, value, suggested_encoding) if not isinstance(retval, six.text_type): retval = retval.decode('ascii') return retval def file_to_bytes_iterable(self, cls, value, **_): if value.data is not None: if isinstance(value.data, (list, tuple)) and \ isinstance(value.data[0], mmap): return _file_to_iter(value.data[0]) return iter(value.data) if value.handle is not None: f = value.handle f.seek(0) return _file_to_iter(f) assert value.path is not None, "You need to write data to " \ "persistent storage first if you want to read it back." try: path = value.path if not isabs(value.path): path = join(value.store, value.path) assert abspath(path).startswith(value.store), \ "No relative paths are allowed" return _file_to_iter(open(path, 'rb')) except IOError as e: if e.errno == errno.ENOENT: raise ResourceNotFoundError(value.path) else: raise InternalError("Error accessing requested file") def simple_model_to_bytes_iterable(self, cls, value, **kwargs): retval = self.to_bytes(cls, value, **kwargs) if retval is None: return (b'',) return (retval,) def complex_model_to_bytes_iterable(self, cls, value, **_): if self.ignore_uncap: return tuple() raise TypeError("This protocol can only serialize primitives.") def complex_model_base_to_bytes(self, cls, value, **_): raise TypeError("Only primitives can be serialized to string.") def complex_model_base_to_unicode(self, cls, value, **_): raise TypeError("Only primitives can be serialized to string.") def xmlattribute_to_bytes(self, cls, string, **kwargs): return self.to_bytes(cls.type, string, **kwargs) def xmlattribute_to_unicode(self, cls, string, **kwargs): return self.to_unicode(cls.type, string, **kwargs) def model_base_to_bytes_iterable(self, cls, value, **kwargs): return cls.to_bytes_iterable(value, **kwargs) def model_base_to_bytes(self, cls, value, **kwargs): return cls.to_bytes(value, **kwargs) def model_base_to_unicode(self, cls, value, **kwargs): return cls.to_unicode(value, **kwargs) def _datetime_to_unicode(self, cls, value, **_): """Returns ISO formatted datetimes.""" cls_attrs = self.get_cls_attrs(cls) if cls_attrs.as_timezone is not None and value.tzinfo is not None: value = value.astimezone(cls_attrs.as_timezone) if not cls_attrs.timezone: value = value.replace(tzinfo=None) dt_format = self._get_datetime_format(cls_attrs) if dt_format is None: retval = value.isoformat() elif six.PY2 and isinstance(dt_format, unicode): retval = self.strftime(value, dt_format.encode('utf8')).decode('utf8') else: retval = self.strftime(value, dt_format) # FIXME: must deprecate string_format, this should have been str_format str_format = cls_attrs.string_format if str_format is None: str_format = cls_attrs.str_format if str_format is not None: return str_format.format(value) # FIXME: must deprecate interp_format, this should have been just format interp_format = cls_attrs.interp_format if interp_format is not None: return interp_format.format(value) return retval def _date_to_bytes(self, cls, value, **_): cls_attrs = self.get_cls_attrs(cls) date_format = cls_attrs.date_format if date_format is None: retval = value.isoformat() elif six.PY2 and isinstance(date_format, unicode): date_format = date_format.encode('utf8') retval = self.strftime(value, date_format).decode('utf8') else: retval = self.strftime(value, date_format) str_format = cls_attrs.str_format if str_format is not None: return str_format.format(value) format = cls_attrs.format if format is not None: return format.format(value) return retval # Format a datetime through its full proleptic Gregorian date range. # http://code.activestate.com/recipes/ # 306860-proleptic-gregorian-dates-and-strftime-before-1900/ # http://stackoverflow.com/a/32206673 # # >>> strftime(datetime.date(1850, 8, 2), "%Y/%M/%d was a %A") # '1850/00/02 was a Friday' # >>> # remove the unsupposed "%s" command. But don't # do it if there's an even number of %s before the s # because those are all escaped. Can't simply # remove the s because the result of # %sY # should be %Y if %s isn't supported, not the # 4 digit year. _illegal_s = re.compile(r"((^|[^%])(%%)*%s)") @staticmethod def _findall_datetime(text, substr): # Also finds overlaps sites = [] i = 0 while 1: j = text.find(substr, i) if j == -1: break sites.append(j) i=j+1 return sites # Every 28 years the calendar repeats, except through century leap # years where it's 6 years. But only if you're using the Gregorian # calendar. ;) @classmethod def strftime(cls, dt, fmt): if cls._illegal_s.search(fmt): raise TypeError("This strftime implementation does not handle %s") if dt.year > 1900: return dt.strftime(fmt) year = dt.year # For every non-leap year century, advance by # 6 years to get into the 28-year repeat cycle delta = 2000 - year off = 6*(delta // 100 + delta // 400) year += off # Move to around the year 2000 year += ((2000 - year) // 28) * 28 timetuple = dt.timetuple() s1 = strftime(fmt, (year,) + timetuple[1:]) sites1 = cls._findall_datetime(s1, str(year)) s2 = strftime(fmt, (year+28,) + timetuple[1:]) sites2 = cls._findall_datetime(s2, str(year+28)) sites = [] for site in sites1: if site in sites2: sites.append(site) s = s1 syear = "%4d" % (dt.year,) for site in sites: s = s[:site] + syear + s[site+4:] return s _uuid_serialize = { None: str, str: str, 'str': str, 'hex': lambda u: u.hex, 'urn': lambda u: u.urn, 'bytes': lambda u: u.bytes, 'bytes_le': lambda u: u.bytes_le, 'fields': lambda u: u.fields, int: lambda u: u.int, 'int': lambda u: u.int, } _uuid_deserialize = { None: uuid.UUID, str: uuid.UUID, 'str': uuid.UUID, 'hex': lambda s: uuid.UUID(hex=s), 'urn': lambda s: uuid.UUID(hex=s), 'bytes': lambda s: uuid.UUID(bytes=s), 'bytes_le': lambda s: uuid.UUID(bytes_le=s), 'fields': lambda s: uuid.UUID(fields=s), int: lambda s: uuid.UUID(int=s), 'int': lambda s: uuid.UUID(int=s), (int, int): lambda s: uuid.UUID(int=s), ('int', int): lambda s: uuid.UUID(int=s), (int, str): lambda s: uuid.UUID(int=int(s)), ('int', str): lambda s: uuid.UUID(int=int(s)), } if six.PY2: _uuid_deserialize[('int', long)] = _uuid_deserialize[('int', int)] _uuid_deserialize[(int, long)] = _uuid_deserialize[('int', int)] def _parse_datetime_iso_match(date_match, tz=None): fields = date_match.groupdict() year = int(fields.get('year')) month = int(fields.get('month')) day = int(fields.get('day')) hour = int(fields.get('hr')) minute = int(fields.get('min')) second = int(fields.get('sec')) usecond = fields.get("sec_frac") if usecond is None: usecond = 0 else: # we only get the most significant 6 digits because that's what # datetime can handle. usecond = int(round(float(usecond) * 1e6)) return datetime(year, month, day, hour, minute, second, usecond, tz) _dt_sec = lambda cls, val: \ int(mktime(val.timetuple())) _dt_sec_float = lambda cls, val: \ mktime(val.timetuple()) + (val.microsecond / 1e6) _dt_msec = lambda cls, val: \ int(mktime(val.timetuple())) * 1000 + (val.microsecond // 1000) _dt_msec_float = lambda cls, val: \ mktime(val.timetuple()) * 1000 + (val.microsecond / 1000.0) _dt_usec = lambda cls, val: \ int(mktime(val.timetuple())) * 1000000 + val.microsecond _datetime_smap = { 'sec': _dt_sec, 'secs': _dt_sec, 'second': _dt_sec, 'seconds': _dt_sec, 'sec_float': _dt_sec_float, 'secs_float': _dt_sec_float, 'second_float': _dt_sec_float, 'seconds_float': _dt_sec_float, 'msec': _dt_msec, 'msecs': _dt_msec, 'msecond': _dt_msec, 'mseconds': _dt_msec, 'millisecond': _dt_msec, 'milliseconds': _dt_msec, 'msec_float': _dt_msec_float, 'msecs_float': _dt_msec_float, 'msecond_float': _dt_msec_float, 'mseconds_float': _dt_msec_float, 'millisecond_float': _dt_msec_float, 'milliseconds_float': _dt_msec_float, 'usec': _dt_usec, 'usecs': _dt_usec, 'usecond': _dt_usec, 'useconds': _dt_usec, 'microsecond': _dt_usec, 'microseconds': _dt_usec, } def _file_to_iter(f): try: data = f.read(8192) while len(data) > 0: yield data data = f.read(8192) finally: f.close() META_ATTR = ['nullable', 'default_factory']
lgpl-2.1
xunmengfeng/engine
tools/android/checkstyle/checkstyle.py
63
2924
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Script that is used by PRESUBMIT.py to run style checks on Java files.""" import os import subprocess import xml.dom.minidom CHROMIUM_SRC = os.path.normpath( os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)) CHECKSTYLE_ROOT = os.path.join(CHROMIUM_SRC, 'third_party', 'checkstyle', 'checkstyle-6.5-all.jar') def RunCheckstyle(input_api, output_api, style_file, black_list=None): if not os.path.exists(style_file): file_error = (' Java checkstyle configuration file is missing: ' + style_file) return [output_api.PresubmitError(file_error)] # Filter out non-Java files and files that were deleted. java_files = [x.AbsoluteLocalPath() for x in input_api.AffectedSourceFiles( lambda f: input_api.FilterSourceFile(f, black_list=black_list)) if os.path.splitext(x.LocalPath())[1] == '.java'] if not java_files: return [] # Run checkstyle checkstyle_env = os.environ.copy() checkstyle_env['JAVA_CMD'] = 'java' try: check = subprocess.Popen(['java', '-cp', CHECKSTYLE_ROOT, 'com.puppycrawl.tools.checkstyle.Main', '-c', style_file, '-f', 'xml'] + java_files, stdout=subprocess.PIPE, env=checkstyle_env) stdout, _ = check.communicate() except OSError as e: import errno if e.errno == errno.ENOENT: install_error = (' checkstyle is not installed. Please run ' 'build/install-build-deps-android.sh') return [output_api.PresubmitPromptWarning(install_error)] result_errors = [] result_warnings = [] local_path = input_api.PresubmitLocalPath() root = xml.dom.minidom.parseString(stdout) for fileElement in root.getElementsByTagName('file'): fileName = fileElement.attributes['name'].value fileName = os.path.relpath(fileName, local_path) errors = fileElement.getElementsByTagName('error') for error in errors: line = error.attributes['line'].value column = '' if error.hasAttribute('column'): column = '%s:' % (error.attributes['column'].value) message = error.attributes['message'].value result = ' %s:%s:%s %s' % (fileName, line, column, message) severity = error.attributes['severity'].value if severity == 'error': result_errors.append(result) elif severity == 'warning': result_warnings.append(result) result = [] if result_warnings: result.append(output_api.PresubmitPromptWarning('\n'.join(result_warnings))) if result_errors: result.append(output_api.PresubmitError('\n'.join(result_errors))) return result
bsd-3-clause
bluechris/xbmc
addons/service.xbmc.versioncheck/lib/common.py
82
7008
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Team-XBMC # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import xbmc import xbmcaddon import xbmcgui import xbmcvfs __addon__ = xbmcaddon.Addon() __addonversion__ = __addon__.getAddonInfo('version') __addonname__ = __addon__.getAddonInfo('name') __addonpath__ = __addon__.getAddonInfo('path').decode('utf-8') __addonprofile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') ).decode('utf-8') __icon__ = __addon__.getAddonInfo('icon') # Fixes unicode problems def string_unicode(text, encoding='utf-8'): try: text = unicode( text, encoding ) except: pass return text def normalize_string(text): try: text = unicodedata.normalize('NFKD', string_unicode(text)).encode('ascii', 'ignore') except: pass return text def localise(id): string = normalize_string(__addon__.getLocalizedString(id)) return string def log(txt): if isinstance (txt,str): txt = txt.decode("utf-8") message = u'%s: %s' % ("Version Check", txt) xbmc.log(msg=message.encode("utf-8"), level=xbmc.LOGDEBUG) def get_password_from_user(): keyboard = xbmc.Keyboard("", __addonname__ + "," +localise(32022), True) keyboard.doModal() if (keyboard.isConfirmed()): pwd = keyboard.getText() return pwd def message_upgrade_success(): xbmc.executebuiltin("XBMC.Notification(%s, %s, %d, %s)" %(__addonname__, localise(32013), 15000, __icon__)) def message_restart(): if dialog_yesno(32014): xbmc.executebuiltin("RestartApp") def dialog_yesno(line1 = 0, line2 = 0): return xbmcgui.Dialog().yesno(__addonname__, localise(line1), localise(line2)) def upgrade_message(msg, oldversion, upgrade, msg_current, msg_available): # Don't show while watching a video while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested): xbmc.sleep(1000) i = 0 while(i < 5 and not xbmc.abortRequested): xbmc.sleep(1000) i += 1 if __addon__.getSetting("lastnotified_version") < __addonversion__: xbmcgui.Dialog().ok(__addonname__, localise(msg), localise(32001), localise(32002)) #__addon__.setSetting("lastnotified_version", __addonversion__) else: log("Already notified one time for upgrading.") def upgrade_message2( version_installed, version_available, version_stable, oldversion, upgrade,): # shorten releasecandidate to rc if version_installed['tag'] == 'releasecandidate': version_installed['tag'] = 'rc' if version_available['tag'] == 'releasecandidate': version_available['tag'] = 'rc' # convert json-rpc result to strings for usage msg_current = '%i.%i %s%s' %(version_installed['major'], version_installed['minor'], version_installed['tag'], version_installed.get('tagversion','')) msg_available = version_available['major'] + '.' + version_available['minor'] + ' ' + version_available['tag'] + version_available.get('tagversion','') msg_stable = version_stable['major'] + '.' + version_stable['minor'] + ' ' + version_stable['tag'] + version_stable.get('tagversion','') msg = localise(32034) %(msg_current, msg_available) # Don't show notify while watching a video while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested): xbmc.sleep(1000) i = 0 while(i < 10 and not xbmc.abortRequested): xbmc.sleep(1000) i += 1 # hack: convert current version number to stable string # so users don't get notified again. remove in future if __addon__.getSetting("lastnotified_version") == '0.1.24': __addon__.setSetting("lastnotified_stable", msg_stable) # Show different dialogs depending if there's a newer stable available. # Also split them between xbmc and kodi notifications to reduce possible confusion. # People will find out once they visit the website. # For stable only notify once and when there's a newer stable available. # Ignore any add-on updates as those only count for != stable if oldversion == 'stable' and __addon__.getSetting("lastnotified_stable") != msg_stable: if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0": xbmcgui.Dialog().ok(__addonname__, msg, localise(32030), localise(32031)) else: xbmcgui.Dialog().ok(__addonname__, msg, localise(32032), localise(32033)) __addon__.setSetting("lastnotified_stable", msg_stable) elif oldversion != 'stable' and __addon__.getSetting("lastnotified_version") != msg_available: if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0": # point them to xbmc.org xbmcgui.Dialog().ok(__addonname__, msg, localise(32035), localise(32031)) else: #use kodi.tv xbmcgui.Dialog().ok(__addonname__, msg, localise(32035), localise(32033)) # older skins don't support a text field in the OK dialog. # let's use split lines for now. see code above. ''' msg = localise(32034) %(msg_current, msg_available) if oldversion == 'stable': msg = msg + ' ' + localise(32030) else: msg = msg + ' ' + localise(32035) msg = msg + ' ' + localise(32031) xbmcgui.Dialog().ok(__addonname__, msg) #__addon__.setSetting("lastnotified_version", __addonversion__) ''' __addon__.setSetting("lastnotified_version", msg_available) else: log("Already notified one time for upgrading.")
gpl-2.0
olamy/tapiriik
tapiriik/services/api.py
10
3512
class ServiceExceptionScope: Account = "account" Service = "service" # Unlike Account and Service-level blocking exceptions, these are implemented via ActivityRecord.FailureCounts # Eventually, all errors might be stored in ActivityRecords Activity = "activity" class ServiceException(Exception): def __init__(self, message, scope=ServiceExceptionScope.Service, block=False, user_exception=None, trigger_exhaustive=True): Exception.__init__(self, message) self.Message = message self.UserException = user_exception self.Block = block self.Scope = scope self.TriggerExhaustive = trigger_exhaustive def __str__(self): return self.Message + " (user " + str(self.UserException) + " )" class ServiceWarning(ServiceException): pass class APIException(ServiceException): pass class APIWarning(ServiceWarning): pass # Theoretically, APIExcludeActivity should actually be a ServiceException with block=True, scope=Activity # It's on the to-do list. class APIExcludeActivity(Exception): def __init__(self, message, activity=None, activity_id=None, permanent=True, user_exception=None): Exception.__init__(self, message) self.Message = message self.Activity = activity self.ExternalActivityID = activity_id self.Permanent = permanent self.UserException = user_exception def __str__(self): return self.Message + " (activity " + str(self.ExternalActivityID) + ")" class UserExceptionType: # Account-level exceptions (not a hardcoded thing, just to keep these seperate) Authorization = "auth" AccountFull = "full" AccountExpired = "expired" AccountUnpaid = "unpaid" # vs. expired, which implies it was at some point function, via payment or trial or otherwise. # Activity-level exceptions FlowException = "flow" Private = "private" NotTriggered = "notrigger" Deferred = "deferred" # They've instructed us not to synchronize activities for some time after they complete PredatesWindow = "predates_window" # They've instructed us not to synchronize activities before some date RateLimited = "ratelimited" MissingCredentials = "credentials_missing" # They forgot to check the "Remember these details" box NotConfigured = "config_missing" # Don't think this error is even possible any more. StationaryUnsupported = "stationary" NonGPSUnsupported = "nongps" TypeUnsupported = "type_unsupported" InsufficientData = "data_insufficient" # Some services demand more data than others provide (ahem, N+) DownloadError = "download" ListingError = "list" # Cases when a service fails listing, so nothing can be uploaded to it. UploadError = "upload" SanityError = "sanity" Corrupt = "corrupt" # Kind of a scary term for what's generally "some data is missing" Untagged = "untagged" LiveTracking = "live" UnknownTZ = "tz_unknown" System = "system" Other = "other" class UserException: def __init__(self, type, extra=None, intervention_required=False, clear_group=None): self.Type = type self.Extra = extra # Unimplemented - displayed as part of the error message. self.InterventionRequired = intervention_required # Does the user need to dismiss this error? self.ClearGroup = clear_group if clear_group else type # Used to group error messages displayed to the user, and let them clear a group that share a common cause.
apache-2.0
laiqiqi886/kbengine
kbe/res/scripts/common/Lib/idlelib/configHandler.py
59
29481
"""Provides access to stored IDLE configuration information. Refer to the comments at the beginning of config-main.def for a description of the available configuration files and the design implemented to update user configuration information. In particular, user configuration choices which duplicate the defaults will be removed from the user's configuration files, and if a file becomes empty, it will be deleted. The contents of the user files may be altered using the Options/Configure IDLE menu to access the configuration GUI (configDialog.py), or manually. Throughout this module there is an emphasis on returning useable defaults when a problem occurs in returning a requested configuration value back to idle. This is to allow IDLE to continue to function in spite of errors in the retrieval of config information. When a default is returned instead of a requested config value, a message is printed to stderr to aid in configuration problem notification and resolution. """ import os import sys from configparser import ConfigParser class InvalidConfigType(Exception): pass class InvalidConfigSet(Exception): pass class InvalidFgBg(Exception): pass class InvalidTheme(Exception): pass class IdleConfParser(ConfigParser): """ A ConfigParser specialised for idle configuration file handling """ def __init__(self, cfgFile, cfgDefaults=None): """ cfgFile - string, fully specified configuration file name """ self.file=cfgFile ConfigParser.__init__(self, defaults=cfgDefaults, strict=False) def Get(self, section, option, type=None, default=None, raw=False): """ Get an option value for given section/option or return default. If type is specified, return as type. """ if not self.has_option(section, option): return default if type=='bool': return self.getboolean(section, option) elif type=='int': return self.getint(section, option) else: return self.get(section, option, raw=raw) def GetOptionList(self,section): """ Get an option list for given section """ if self.has_section(section): return self.options(section) else: #return a default value return [] def Load(self): """ Load the configuration file from disk """ self.read(self.file) class IdleUserConfParser(IdleConfParser): """ IdleConfigParser specialised for user configuration handling. """ def AddSection(self,section): """ if section doesn't exist, add it """ if not self.has_section(section): self.add_section(section) def RemoveEmptySections(self): """ remove any sections that have no options """ for section in self.sections(): if not self.GetOptionList(section): self.remove_section(section) def IsEmpty(self): """ Remove empty sections and then return 1 if parser has no sections left, else return 0. """ self.RemoveEmptySections() if self.sections(): return 0 else: return 1 def RemoveOption(self,section,option): """ If section/option exists, remove it. Returns 1 if option was removed, 0 otherwise. """ if self.has_section(section): return self.remove_option(section,option) def SetOption(self,section,option,value): """ Sets option to value, adding section if required. Returns 1 if option was added or changed, otherwise 0. """ if self.has_option(section,option): if self.get(section,option)==value: return 0 else: self.set(section,option,value) return 1 else: if not self.has_section(section): self.add_section(section) self.set(section,option,value) return 1 def RemoveFile(self): """ Removes the user config file from disk if it exists. """ if os.path.exists(self.file): os.remove(self.file) def Save(self): """Update user configuration file. Remove empty sections. If resulting config isn't empty, write the file to disk. If config is empty, remove the file from disk if it exists. """ if not self.IsEmpty(): fname = self.file try: cfgFile = open(fname, 'w') except OSError: os.unlink(fname) cfgFile = open(fname, 'w') with cfgFile: self.write(cfgFile) else: self.RemoveFile() class IdleConf: """ holds config parsers for all idle config files: default config files (idle install dir)/config-main.def (idle install dir)/config-extensions.def (idle install dir)/config-highlight.def (idle install dir)/config-keys.def user config files (user home dir)/.idlerc/config-main.cfg (user home dir)/.idlerc/config-extensions.cfg (user home dir)/.idlerc/config-highlight.cfg (user home dir)/.idlerc/config-keys.cfg """ def __init__(self): self.defaultCfg={} self.userCfg={} self.cfg={} self.CreateConfigHandlers() self.LoadCfgFiles() #self.LoadCfg() def CreateConfigHandlers(self): """ set up a dictionary of config parsers for default and user configurations respectively """ #build idle install path if __name__ != '__main__': # we were imported idleDir=os.path.dirname(__file__) else: # we were exec'ed (for testing only) idleDir=os.path.abspath(sys.path[0]) userDir=self.GetUserCfgDir() configTypes=('main','extensions','highlight','keys') defCfgFiles={} usrCfgFiles={} for cfgType in configTypes: #build config file names defCfgFiles[cfgType]=os.path.join(idleDir,'config-'+cfgType+'.def') usrCfgFiles[cfgType]=os.path.join(userDir,'config-'+cfgType+'.cfg') for cfgType in configTypes: #create config parsers self.defaultCfg[cfgType]=IdleConfParser(defCfgFiles[cfgType]) self.userCfg[cfgType]=IdleUserConfParser(usrCfgFiles[cfgType]) def GetUserCfgDir(self): """ Creates (if required) and returns a filesystem directory for storing user config files. """ cfgDir = '.idlerc' userDir = os.path.expanduser('~') if userDir != '~': # expanduser() found user home dir if not os.path.exists(userDir): warn = ('\n Warning: os.path.expanduser("~") points to\n '+ userDir+',\n but the path does not exist.') try: print(warn, file=sys.stderr) except OSError: pass userDir = '~' if userDir == "~": # still no path to home! # traditionally IDLE has defaulted to os.getcwd(), is this adequate? userDir = os.getcwd() userDir = os.path.join(userDir, cfgDir) if not os.path.exists(userDir): try: os.mkdir(userDir) except OSError: warn = ('\n Warning: unable to create user config directory\n'+ userDir+'\n Check path and permissions.\n Exiting!\n') print(warn, file=sys.stderr) raise SystemExit return userDir def GetOption(self, configType, section, option, default=None, type=None, warn_on_default=True, raw=False): """ Get an option value for given config type and given general configuration section/option or return a default. If type is specified, return as type. Firstly the user configuration is checked, with a fallback to the default configuration, and a final 'catch all' fallback to a useable passed-in default if the option isn't present in either the user or the default configuration. configType must be one of ('main','extensions','highlight','keys') If a default is returned, and warn_on_default is True, a warning is printed to stderr. """ try: if self.userCfg[configType].has_option(section,option): return self.userCfg[configType].Get(section, option, type=type, raw=raw) except ValueError: warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n' ' invalid %r value for configuration option %r\n' ' from section %r: %r' % (type, option, section, self.userCfg[configType].Get(section, option, raw=raw))) try: print(warning, file=sys.stderr) except OSError: pass try: if self.defaultCfg[configType].has_option(section,option): return self.defaultCfg[configType].Get(section, option, type=type, raw=raw) except ValueError: pass #returning default, print warning if warn_on_default: warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n' ' problem retrieving configuration option %r\n' ' from section %r.\n' ' returning default value: %r' % (option, section, default)) try: print(warning, file=sys.stderr) except OSError: pass return default def SetOption(self, configType, section, option, value): """In user's config file, set section's option to value. """ self.userCfg[configType].SetOption(section, option, value) def GetSectionList(self, configSet, configType): """ Get a list of sections from either the user or default config for the given config type. configSet must be either 'user' or 'default' configType must be one of ('main','extensions','highlight','keys') """ if not (configType in ('main','extensions','highlight','keys')): raise InvalidConfigType('Invalid configType specified') if configSet == 'user': cfgParser=self.userCfg[configType] elif configSet == 'default': cfgParser=self.defaultCfg[configType] else: raise InvalidConfigSet('Invalid configSet specified') return cfgParser.sections() def GetHighlight(self, theme, element, fgBg=None): """ return individual highlighting theme elements. fgBg - string ('fg'or'bg') or None, if None return a dictionary containing fg and bg colours (appropriate for passing to Tkinter in, e.g., a tag_config call), otherwise fg or bg colour only as specified. """ if self.defaultCfg['highlight'].has_section(theme): themeDict=self.GetThemeDict('default',theme) else: themeDict=self.GetThemeDict('user',theme) fore=themeDict[element+'-foreground'] if element=='cursor': #there is no config value for cursor bg back=themeDict['normal-background'] else: back=themeDict[element+'-background'] highlight={"foreground": fore,"background": back} if not fgBg: #return dict of both colours return highlight else: #return specified colour only if fgBg == 'fg': return highlight["foreground"] if fgBg == 'bg': return highlight["background"] else: raise InvalidFgBg('Invalid fgBg specified') def GetThemeDict(self,type,themeName): """ type - string, 'default' or 'user' theme type themeName - string, theme name Returns a dictionary which holds {option:value} for each element in the specified theme. Values are loaded over a set of ultimate last fallback defaults to guarantee that all theme elements are present in a newly created theme. """ if type == 'user': cfgParser=self.userCfg['highlight'] elif type == 'default': cfgParser=self.defaultCfg['highlight'] else: raise InvalidTheme('Invalid theme type specified') #foreground and background values are provded for each theme element #(apart from cursor) even though all these values are not yet used #by idle, to allow for their use in the future. Default values are #generally black and white. theme={ 'normal-foreground':'#000000', 'normal-background':'#ffffff', 'keyword-foreground':'#000000', 'keyword-background':'#ffffff', 'builtin-foreground':'#000000', 'builtin-background':'#ffffff', 'comment-foreground':'#000000', 'comment-background':'#ffffff', 'string-foreground':'#000000', 'string-background':'#ffffff', 'definition-foreground':'#000000', 'definition-background':'#ffffff', 'hilite-foreground':'#000000', 'hilite-background':'gray', 'break-foreground':'#ffffff', 'break-background':'#000000', 'hit-foreground':'#ffffff', 'hit-background':'#000000', 'error-foreground':'#ffffff', 'error-background':'#000000', #cursor (only foreground can be set) 'cursor-foreground':'#000000', #shell window 'stdout-foreground':'#000000', 'stdout-background':'#ffffff', 'stderr-foreground':'#000000', 'stderr-background':'#ffffff', 'console-foreground':'#000000', 'console-background':'#ffffff' } for element in theme: if not cfgParser.has_option(themeName,element): #we are going to return a default, print warning warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict' ' -\n problem retrieving theme element %r' '\n from theme %r.\n' ' returning default value: %r' % (element, themeName, theme[element])) try: print(warning, file=sys.stderr) except OSError: pass colour=cfgParser.Get(themeName,element,default=theme[element]) theme[element]=colour return theme def CurrentTheme(self): """ Returns the name of the currently active theme """ return self.GetOption('main','Theme','name',default='') def CurrentKeys(self): """ Returns the name of the currently active key set """ return self.GetOption('main','Keys','name',default='') def GetExtensions(self, active_only=True, editor_only=False, shell_only=False): """ Gets a list of all idle extensions declared in the config files. active_only - boolean, if true only return active (enabled) extensions """ extns=self.RemoveKeyBindNames( self.GetSectionList('default','extensions')) userExtns=self.RemoveKeyBindNames( self.GetSectionList('user','extensions')) for extn in userExtns: if extn not in extns: #user has added own extension extns.append(extn) if active_only: activeExtns=[] for extn in extns: if self.GetOption('extensions', extn, 'enable', default=True, type='bool'): #the extension is enabled if editor_only or shell_only: if editor_only: option = "enable_editor" else: option = "enable_shell" if self.GetOption('extensions', extn,option, default=True, type='bool', warn_on_default=False): activeExtns.append(extn) else: activeExtns.append(extn) return activeExtns else: return extns def RemoveKeyBindNames(self,extnNameList): #get rid of keybinding section names names=extnNameList kbNameIndicies=[] for name in names: if name.endswith(('_bindings', '_cfgBindings')): kbNameIndicies.append(names.index(name)) kbNameIndicies.sort() kbNameIndicies.reverse() for index in kbNameIndicies: #delete each keybinding section name del(names[index]) return names def GetExtnNameForEvent(self,virtualEvent): """ Returns the name of the extension that virtualEvent is bound in, or None if not bound in any extension. virtualEvent - string, name of the virtual event to test for, without the enclosing '<< >>' """ extName=None vEvent='<<'+virtualEvent+'>>' for extn in self.GetExtensions(active_only=0): for event in self.GetExtensionKeys(extn): if event == vEvent: extName=extn return extName def GetExtensionKeys(self,extensionName): """ returns a dictionary of the configurable keybindings for a particular extension,as they exist in the dictionary returned by GetCurrentKeySet; that is, where previously used bindings are disabled. """ keysName=extensionName+'_cfgBindings' activeKeys=self.GetCurrentKeySet() extKeys={} if self.defaultCfg['extensions'].has_section(keysName): eventNames=self.defaultCfg['extensions'].GetOptionList(keysName) for eventName in eventNames: event='<<'+eventName+'>>' binding=activeKeys[event] extKeys[event]=binding return extKeys def __GetRawExtensionKeys(self,extensionName): """ returns a dictionary of the configurable keybindings for a particular extension, as defined in the configuration files, or an empty dictionary if no bindings are found """ keysName=extensionName+'_cfgBindings' extKeys={} if self.defaultCfg['extensions'].has_section(keysName): eventNames=self.defaultCfg['extensions'].GetOptionList(keysName) for eventName in eventNames: binding=self.GetOption('extensions',keysName, eventName,default='').split() event='<<'+eventName+'>>' extKeys[event]=binding return extKeys def GetExtensionBindings(self,extensionName): """ Returns a dictionary of all the event bindings for a particular extension. The configurable keybindings are returned as they exist in the dictionary returned by GetCurrentKeySet; that is, where re-used keybindings are disabled. """ bindsName=extensionName+'_bindings' extBinds=self.GetExtensionKeys(extensionName) #add the non-configurable bindings if self.defaultCfg['extensions'].has_section(bindsName): eventNames=self.defaultCfg['extensions'].GetOptionList(bindsName) for eventName in eventNames: binding=self.GetOption('extensions',bindsName, eventName,default='').split() event='<<'+eventName+'>>' extBinds[event]=binding return extBinds def GetKeyBinding(self, keySetName, eventStr): """ returns the keybinding for a specific event. keySetName - string, name of key binding set eventStr - string, the virtual event we want the binding for, represented as a string, eg. '<<event>>' """ eventName=eventStr[2:-2] #trim off the angle brackets binding=self.GetOption('keys',keySetName,eventName,default='').split() return binding def GetCurrentKeySet(self): result = self.GetKeySet(self.CurrentKeys()) if sys.platform == "darwin": # OS X Tk variants do not support the "Alt" keyboard modifier. # So replace all keybingings that use "Alt" with ones that # use the "Option" keyboard modifier. # TO DO: the "Option" modifier does not work properly for # Cocoa Tk and XQuartz Tk so we should not use it # in default OS X KeySets. for k, v in result.items(): v2 = [ x.replace('<Alt-', '<Option-') for x in v ] if v != v2: result[k] = v2 return result def GetKeySet(self,keySetName): """ Returns a dictionary of: all requested core keybindings, plus the keybindings for all currently active extensions. If a binding defined in an extension is already in use, that binding is disabled. """ keySet=self.GetCoreKeys(keySetName) activeExtns=self.GetExtensions(active_only=1) for extn in activeExtns: extKeys=self.__GetRawExtensionKeys(extn) if extKeys: #the extension defines keybindings for event in extKeys: if extKeys[event] in keySet.values(): #the binding is already in use extKeys[event]='' #disable this binding keySet[event]=extKeys[event] #add binding return keySet def IsCoreBinding(self,virtualEvent): """ returns true if the virtual event is bound in the core idle keybindings. virtualEvent - string, name of the virtual event to test for, without the enclosing '<< >>' """ return ('<<'+virtualEvent+'>>') in self.GetCoreKeys() def GetCoreKeys(self, keySetName=None): """ returns the requested set of core keybindings, with fallbacks if required. Keybindings loaded from the config file(s) are loaded _over_ these defaults, so if there is a problem getting any core binding there will be an 'ultimate last resort fallback' to the CUA-ish bindings defined here. """ keyBindings={ '<<copy>>': ['<Control-c>', '<Control-C>'], '<<cut>>': ['<Control-x>', '<Control-X>'], '<<paste>>': ['<Control-v>', '<Control-V>'], '<<beginning-of-line>>': ['<Control-a>', '<Home>'], '<<center-insert>>': ['<Control-l>'], '<<close-all-windows>>': ['<Control-q>'], '<<close-window>>': ['<Alt-F4>'], '<<do-nothing>>': ['<Control-x>'], '<<end-of-file>>': ['<Control-d>'], '<<python-docs>>': ['<F1>'], '<<python-context-help>>': ['<Shift-F1>'], '<<history-next>>': ['<Alt-n>'], '<<history-previous>>': ['<Alt-p>'], '<<interrupt-execution>>': ['<Control-c>'], '<<view-restart>>': ['<F6>'], '<<restart-shell>>': ['<Control-F6>'], '<<open-class-browser>>': ['<Alt-c>'], '<<open-module>>': ['<Alt-m>'], '<<open-new-window>>': ['<Control-n>'], '<<open-window-from-file>>': ['<Control-o>'], '<<plain-newline-and-indent>>': ['<Control-j>'], '<<print-window>>': ['<Control-p>'], '<<redo>>': ['<Control-y>'], '<<remove-selection>>': ['<Escape>'], '<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'], '<<save-window-as-file>>': ['<Alt-s>'], '<<save-window>>': ['<Control-s>'], '<<select-all>>': ['<Alt-a>'], '<<toggle-auto-coloring>>': ['<Control-slash>'], '<<undo>>': ['<Control-z>'], '<<find-again>>': ['<Control-g>', '<F3>'], '<<find-in-files>>': ['<Alt-F3>'], '<<find-selection>>': ['<Control-F3>'], '<<find>>': ['<Control-f>'], '<<replace>>': ['<Control-h>'], '<<goto-line>>': ['<Alt-g>'], '<<smart-backspace>>': ['<Key-BackSpace>'], '<<newline-and-indent>>': ['<Key-Return>', '<Key-KP_Enter>'], '<<smart-indent>>': ['<Key-Tab>'], '<<indent-region>>': ['<Control-Key-bracketright>'], '<<dedent-region>>': ['<Control-Key-bracketleft>'], '<<comment-region>>': ['<Alt-Key-3>'], '<<uncomment-region>>': ['<Alt-Key-4>'], '<<tabify-region>>': ['<Alt-Key-5>'], '<<untabify-region>>': ['<Alt-Key-6>'], '<<toggle-tabs>>': ['<Alt-Key-t>'], '<<change-indentwidth>>': ['<Alt-Key-u>'], '<<del-word-left>>': ['<Control-Key-BackSpace>'], '<<del-word-right>>': ['<Control-Key-Delete>'] } if keySetName: for event in keyBindings: binding=self.GetKeyBinding(keySetName,event) if binding: keyBindings[event]=binding else: #we are going to return a default, print warning warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys' ' -\n problem retrieving key binding for event %r' '\n from key set %r.\n' ' returning default value: %r' % (event, keySetName, keyBindings[event])) try: print(warning, file=sys.stderr) except OSError: pass return keyBindings def GetExtraHelpSourceList(self,configSet): """Fetch list of extra help sources from a given configSet. Valid configSets are 'user' or 'default'. Return a list of tuples of the form (menu_item , path_to_help_file , option), or return the empty list. 'option' is the sequence number of the help resource. 'option' values determine the position of the menu items on the Help menu, therefore the returned list must be sorted by 'option'. """ helpSources=[] if configSet=='user': cfgParser=self.userCfg['main'] elif configSet=='default': cfgParser=self.defaultCfg['main'] else: raise InvalidConfigSet('Invalid configSet specified') options=cfgParser.GetOptionList('HelpFiles') for option in options: value=cfgParser.Get('HelpFiles',option,default=';') if value.find(';')==-1: #malformed config entry with no ';' menuItem='' #make these empty helpPath='' #so value won't be added to list else: #config entry contains ';' as expected value=value.split(';') menuItem=value[0].strip() helpPath=value[1].strip() if menuItem and helpPath: #neither are empty strings helpSources.append( (menuItem,helpPath,option) ) helpSources.sort(key=lambda x: x[2]) return helpSources def GetAllExtraHelpSourcesList(self): """ Returns a list of tuples containing the details of all additional help sources configured, or an empty list if there are none. Tuples are of the format returned by GetExtraHelpSourceList. """ allHelpSources=( self.GetExtraHelpSourceList('default')+ self.GetExtraHelpSourceList('user') ) return allHelpSources def LoadCfgFiles(self): """ load all configuration files. """ for key in self.defaultCfg: self.defaultCfg[key].Load() self.userCfg[key].Load() #same keys def SaveUserCfgFiles(self): """ write all loaded user configuration files back to disk """ for key in self.userCfg: self.userCfg[key].Save() idleConf=IdleConf() ### module test if __name__ == '__main__': def dumpCfg(cfg): print('\n',cfg,'\n') for key in cfg: sections=cfg[key].sections() print(key) print(sections) for section in sections: options=cfg[key].options(section) print(section) print(options) for option in options: print(option, '=', cfg[key].Get(section,option)) dumpCfg(idleConf.defaultCfg) dumpCfg(idleConf.userCfg) print(idleConf.userCfg['main'].Get('Theme','name')) #print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')
lgpl-3.0
Vignesh2208/Awlsim
awlsim/core/instructions/insn_dtb.py
2
1783
# -*- coding: utf-8 -*- # # AWL simulator - instructions # # Copyright 2012-2014 Michael Buesch <m@bues.ch> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # from __future__ import division, absolute_import, print_function, unicode_literals from awlsim.common.compat import * from awlsim.core.instructions.main import * #@nocy from awlsim.core.operators import * #from awlsim.core.instructions.main cimport * #@cy class AwlInsn_DTB(AwlInsn): #+cdef __slots__ = () def __init__(self, cpu, rawInsn): AwlInsn.__init__(self, cpu, AwlInsn.TYPE_DTB, rawInsn) self.assertOpCount(0) def run(self): #@cy cdef S7StatusWord s s = self.cpu.statusWord binval, bcd = dwordToSignedPyInt(self.cpu.accu1.get()), 0 if binval < 0: bcd |= 0xF0000000 binval = abs(binval) if binval > 9999999: s.OV, s.OS = 1, 1 return bcd |= binval % 10 bcd |= ((binval // 10) % 10) << 4 bcd |= ((binval // 100) % 10) << 8 bcd |= ((binval // 1000) % 10) << 12 bcd |= ((binval // 10000) % 10) << 16 bcd |= ((binval // 100000) % 10) << 20 bcd |= ((binval // 1000000) % 10) << 24 self.cpu.accu1.set(bcd) s.OV = 0
gpl-2.0
watspidererik/testenv
flask/lib/python2.7/site-packages/babel/dates.py
85
43083
# -*- coding: utf-8 -*- """ babel.dates ~~~~~~~~~~~ Locale dependent formatting and parsing of dates and times. The default locale for the functions in this module is determined by the following environment variables, in that order: * ``LC_TIME``, * ``LC_ALL``, and * ``LANG`` :copyright: (c) 2013 by the Babel Team. :license: BSD, see LICENSE for more details. """ from __future__ import division import re import pytz as _pytz from datetime import date, datetime, time, timedelta from bisect import bisect_right from babel.core import default_locale, get_global, Locale from babel.util import UTC, LOCALTZ from babel._compat import string_types, integer_types, number_types LC_TIME = default_locale('LC_TIME') # Aliases for use in scopes where the modules are shadowed by local variables date_ = date datetime_ = datetime time_ = time def get_timezone(zone=None): """Looks up a timezone by name and returns it. The timezone object returned comes from ``pytz`` and corresponds to the `tzinfo` interface and can be used with all of the functions of Babel that operate with dates. If a timezone is not known a :exc:`LookupError` is raised. If `zone` is ``None`` a local zone object is returned. :param zone: the name of the timezone to look up. If a timezone object itself is passed in, mit's returned unchanged. """ if zone is None: return LOCALTZ if not isinstance(zone, string_types): return zone try: return _pytz.timezone(zone) except _pytz.UnknownTimeZoneError: raise LookupError('Unknown timezone %s' % zone) def get_next_timezone_transition(zone=None, dt=None): """Given a timezone it will return a :class:`TimezoneTransition` object that holds the information about the next timezone transition that's going to happen. For instance this can be used to detect when the next DST change is going to happen and how it looks like. The transition is calculated relative to the given datetime object. The next transition that follows the date is used. If a transition cannot be found the return value will be `None`. Transition information can only be provided for timezones returned by the :func:`get_timezone` function. :param zone: the timezone for which the transition should be looked up. If not provided the local timezone is used. :param dt: the date after which the next transition should be found. If not given the current time is assumed. """ zone = get_timezone(zone) if dt is None: dt = datetime.utcnow() else: dt = dt.replace(tzinfo=None) if not hasattr(zone, '_utc_transition_times'): raise TypeError('Given timezone does not have UTC transition ' 'times. This can happen because the operating ' 'system fallback local timezone is used or a ' 'custom timezone object') try: idx = max(0, bisect_right(zone._utc_transition_times, dt)) old_trans = zone._transition_info[idx - 1] new_trans = zone._transition_info[idx] old_tz = zone._tzinfos[old_trans] new_tz = zone._tzinfos[new_trans] except (LookupError, ValueError): return None return TimezoneTransition( activates=zone._utc_transition_times[idx], from_tzinfo=old_tz, to_tzinfo=new_tz, reference_date=dt ) class TimezoneTransition(object): """A helper object that represents the return value from :func:`get_next_timezone_transition`. """ def __init__(self, activates, from_tzinfo, to_tzinfo, reference_date=None): #: the time of the activation of the timezone transition in UTC. self.activates = activates #: the timezone from where the transition starts. self.from_tzinfo = from_tzinfo #: the timezone for after the transition. self.to_tzinfo = to_tzinfo #: the reference date that was provided. This is the `dt` parameter #: to the :func:`get_next_timezone_transition`. self.reference_date = reference_date @property def from_tz(self): """The name of the timezone before the transition.""" return self.from_tzinfo._tzname @property def to_tz(self): """The name of the timezone after the transition.""" return self.to_tzinfo._tzname @property def from_offset(self): """The UTC offset in seconds before the transition.""" return int(self.from_tzinfo._utcoffset.total_seconds()) @property def to_offset(self): """The UTC offset in seconds after the transition.""" return int(self.to_tzinfo._utcoffset.total_seconds()) def __repr__(self): return '<TimezoneTransition %s -> %s (%s)>' % ( self.from_tz, self.to_tz, self.activates, ) def get_period_names(locale=LC_TIME): """Return the names for day periods (AM/PM) used by the locale. >>> get_period_names(locale='en_US')['am'] u'AM' :param locale: the `Locale` object, or a locale string """ return Locale.parse(locale).periods def get_day_names(width='wide', context='format', locale=LC_TIME): """Return the day names used by the locale for the specified format. >>> get_day_names('wide', locale='en_US')[1] u'Tuesday' >>> get_day_names('abbreviated', locale='es')[1] u'mar' >>> get_day_names('narrow', context='stand-alone', locale='de_DE')[1] u'D' :param width: the width to use, one of "wide", "abbreviated", or "narrow" :param context: the context, either "format" or "stand-alone" :param locale: the `Locale` object, or a locale string """ return Locale.parse(locale).days[context][width] def get_month_names(width='wide', context='format', locale=LC_TIME): """Return the month names used by the locale for the specified format. >>> get_month_names('wide', locale='en_US')[1] u'January' >>> get_month_names('abbreviated', locale='es')[1] u'ene' >>> get_month_names('narrow', context='stand-alone', locale='de_DE')[1] u'J' :param width: the width to use, one of "wide", "abbreviated", or "narrow" :param context: the context, either "format" or "stand-alone" :param locale: the `Locale` object, or a locale string """ return Locale.parse(locale).months[context][width] def get_quarter_names(width='wide', context='format', locale=LC_TIME): """Return the quarter names used by the locale for the specified format. >>> get_quarter_names('wide', locale='en_US')[1] u'1st quarter' >>> get_quarter_names('abbreviated', locale='de_DE')[1] u'Q1' :param width: the width to use, one of "wide", "abbreviated", or "narrow" :param context: the context, either "format" or "stand-alone" :param locale: the `Locale` object, or a locale string """ return Locale.parse(locale).quarters[context][width] def get_era_names(width='wide', locale=LC_TIME): """Return the era names used by the locale for the specified format. >>> get_era_names('wide', locale='en_US')[1] u'Anno Domini' >>> get_era_names('abbreviated', locale='de_DE')[1] u'n. Chr.' :param width: the width to use, either "wide", "abbreviated", or "narrow" :param locale: the `Locale` object, or a locale string """ return Locale.parse(locale).eras[width] def get_date_format(format='medium', locale=LC_TIME): """Return the date formatting patterns used by the locale for the specified format. >>> get_date_format(locale='en_US') <DateTimePattern u'MMM d, y'> >>> get_date_format('full', locale='de_DE') <DateTimePattern u'EEEE, d. MMMM y'> :param format: the format to use, one of "full", "long", "medium", or "short" :param locale: the `Locale` object, or a locale string """ return Locale.parse(locale).date_formats[format] def get_datetime_format(format='medium', locale=LC_TIME): """Return the datetime formatting patterns used by the locale for the specified format. >>> get_datetime_format(locale='en_US') u'{1}, {0}' :param format: the format to use, one of "full", "long", "medium", or "short" :param locale: the `Locale` object, or a locale string """ patterns = Locale.parse(locale).datetime_formats if format not in patterns: format = None return patterns[format] def get_time_format(format='medium', locale=LC_TIME): """Return the time formatting patterns used by the locale for the specified format. >>> get_time_format(locale='en_US') <DateTimePattern u'h:mm:ss a'> >>> get_time_format('full', locale='de_DE') <DateTimePattern u'HH:mm:ss zzzz'> :param format: the format to use, one of "full", "long", "medium", or "short" :param locale: the `Locale` object, or a locale string """ return Locale.parse(locale).time_formats[format] def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME): """Return the timezone associated with the given `datetime` object formatted as string indicating the offset from GMT. >>> dt = datetime(2007, 4, 1, 15, 30) >>> get_timezone_gmt(dt, locale='en') u'GMT+00:00' >>> tz = get_timezone('America/Los_Angeles') >>> dt = datetime(2007, 4, 1, 15, 30, tzinfo=tz) >>> get_timezone_gmt(dt, locale='en') u'GMT-08:00' >>> get_timezone_gmt(dt, 'short', locale='en') u'-0800' The long format depends on the locale, for example in France the acronym UTC string is used instead of GMT: >>> get_timezone_gmt(dt, 'long', locale='fr_FR') u'UTC-08:00' .. versionadded:: 0.9 :param datetime: the ``datetime`` object; if `None`, the current date and time in UTC is used :param width: either "long" or "short" :param locale: the `Locale` object, or a locale string """ if datetime is None: datetime = datetime_.utcnow() elif isinstance(datetime, integer_types): datetime = datetime_.utcfromtimestamp(datetime).time() if datetime.tzinfo is None: datetime = datetime.replace(tzinfo=UTC) locale = Locale.parse(locale) offset = datetime.tzinfo.utcoffset(datetime) seconds = offset.days * 24 * 60 * 60 + offset.seconds hours, seconds = divmod(seconds, 3600) if width == 'short': pattern = u'%+03d%02d' else: pattern = locale.zone_formats['gmt'] % '%+03d:%02d' return pattern % (hours, seconds // 60) def get_timezone_location(dt_or_tzinfo=None, locale=LC_TIME): """Return a representation of the given timezone using "location format". The result depends on both the local display name of the country and the city associated with the time zone: >>> tz = get_timezone('America/St_Johns') >>> get_timezone_location(tz, locale='de_DE') u"Kanada (St. John's) Zeit" >>> tz = get_timezone('America/Mexico_City') >>> get_timezone_location(tz, locale='de_DE') u'Mexiko (Mexiko-Stadt) Zeit' If the timezone is associated with a country that uses only a single timezone, just the localized country name is returned: >>> tz = get_timezone('Europe/Berlin') >>> get_timezone_name(tz, locale='de_DE') u'Mitteleurop\\xe4ische Zeit' .. versionadded:: 0.9 :param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines the timezone; if `None`, the current date and time in UTC is assumed :param locale: the `Locale` object, or a locale string :return: the localized timezone name using location format """ if dt_or_tzinfo is None: dt = datetime.now() tzinfo = LOCALTZ elif isinstance(dt_or_tzinfo, string_types): dt = None tzinfo = get_timezone(dt_or_tzinfo) elif isinstance(dt_or_tzinfo, integer_types): dt = None tzinfo = UTC elif isinstance(dt_or_tzinfo, (datetime, time)): dt = dt_or_tzinfo if dt.tzinfo is not None: tzinfo = dt.tzinfo else: tzinfo = UTC else: dt = None tzinfo = dt_or_tzinfo locale = Locale.parse(locale) if hasattr(tzinfo, 'zone'): zone = tzinfo.zone else: zone = tzinfo.tzname(dt or datetime.utcnow()) # Get the canonical time-zone code zone = get_global('zone_aliases').get(zone, zone) info = locale.time_zones.get(zone, {}) # Otherwise, if there is only one timezone for the country, return the # localized country name region_format = locale.zone_formats['region'] territory = get_global('zone_territories').get(zone) if territory not in locale.territories: territory = 'ZZ' # invalid/unknown territory_name = locale.territories[territory] if territory and len(get_global('territory_zones').get(territory, [])) == 1: return region_format % (territory_name) # Otherwise, include the city in the output fallback_format = locale.zone_formats['fallback'] if 'city' in info: city_name = info['city'] else: metazone = get_global('meta_zones').get(zone) metazone_info = locale.meta_zones.get(metazone, {}) if 'city' in metazone_info: city_name = metazone_info['city'] elif '/' in zone: city_name = zone.split('/', 1)[1].replace('_', ' ') else: city_name = zone.replace('_', ' ') return region_format % (fallback_format % { '0': city_name, '1': territory_name }) def get_timezone_name(dt_or_tzinfo=None, width='long', uncommon=False, locale=LC_TIME, zone_variant=None): r"""Return the localized display name for the given timezone. The timezone may be specified using a ``datetime`` or `tzinfo` object. >>> dt = time(15, 30, tzinfo=get_timezone('America/Los_Angeles')) >>> get_timezone_name(dt, locale='en_US') u'Pacific Standard Time' >>> get_timezone_name(dt, width='short', locale='en_US') u'PST' If this function gets passed only a `tzinfo` object and no concrete `datetime`, the returned display name is indenpendent of daylight savings time. This can be used for example for selecting timezones, or to set the time of events that recur across DST changes: >>> tz = get_timezone('America/Los_Angeles') >>> get_timezone_name(tz, locale='en_US') u'Pacific Time' >>> get_timezone_name(tz, 'short', locale='en_US') u'PT' If no localized display name for the timezone is available, and the timezone is associated with a country that uses only a single timezone, the name of that country is returned, formatted according to the locale: >>> tz = get_timezone('Europe/Berlin') >>> get_timezone_name(tz, locale='de_DE') u'Mitteleurop\xe4ische Zeit' >>> get_timezone_name(tz, locale='pt_BR') u'Hor\xe1rio da Europa Central' On the other hand, if the country uses multiple timezones, the city is also included in the representation: >>> tz = get_timezone('America/St_Johns') >>> get_timezone_name(tz, locale='de_DE') u'Neufundland-Zeit' Note that short format is currently not supported for all timezones and all locales. This is partially because not every timezone has a short code in every locale. In that case it currently falls back to the long format. For more information see `LDML Appendix J: Time Zone Display Names <http://www.unicode.org/reports/tr35/#Time_Zone_Fallback>`_ .. versionadded:: 0.9 .. versionchanged:: 1.0 Added `zone_variant` support. :param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines the timezone; if a ``tzinfo`` object is used, the resulting display name will be generic, i.e. independent of daylight savings time; if `None`, the current date in UTC is assumed :param width: either "long" or "short" :param uncommon: deprecated and ignored :param zone_variant: defines the zone variation to return. By default the variation is defined from the datetime object passed in. If no datetime object is passed in, the ``'generic'`` variation is assumed. The following values are valid: ``'generic'``, ``'daylight'`` and ``'standard'``. :param locale: the `Locale` object, or a locale string """ if dt_or_tzinfo is None: dt = datetime.now() tzinfo = LOCALTZ elif isinstance(dt_or_tzinfo, string_types): dt = None tzinfo = get_timezone(dt_or_tzinfo) elif isinstance(dt_or_tzinfo, integer_types): dt = None tzinfo = UTC elif isinstance(dt_or_tzinfo, (datetime, time)): dt = dt_or_tzinfo if dt.tzinfo is not None: tzinfo = dt.tzinfo else: tzinfo = UTC else: dt = None tzinfo = dt_or_tzinfo locale = Locale.parse(locale) if hasattr(tzinfo, 'zone'): zone = tzinfo.zone else: zone = tzinfo.tzname(dt) if zone_variant is None: if dt is None: zone_variant = 'generic' else: dst = tzinfo.dst(dt) if dst: zone_variant = 'daylight' else: zone_variant = 'standard' else: if zone_variant not in ('generic', 'standard', 'daylight'): raise ValueError('Invalid zone variation') # Get the canonical time-zone code zone = get_global('zone_aliases').get(zone, zone) info = locale.time_zones.get(zone, {}) # Try explicitly translated zone names first if width in info: if zone_variant in info[width]: return info[width][zone_variant] metazone = get_global('meta_zones').get(zone) if metazone: metazone_info = locale.meta_zones.get(metazone, {}) if width in metazone_info: if zone_variant in metazone_info[width]: return metazone_info[width][zone_variant] # If we have a concrete datetime, we assume that the result can't be # independent of daylight savings time, so we return the GMT offset if dt is not None: return get_timezone_gmt(dt, width=width, locale=locale) return get_timezone_location(dt_or_tzinfo, locale=locale) def format_date(date=None, format='medium', locale=LC_TIME): """Return a date formatted according to the given pattern. >>> d = date(2007, 04, 01) >>> format_date(d, locale='en_US') u'Apr 1, 2007' >>> format_date(d, format='full', locale='de_DE') u'Sonntag, 1. April 2007' If you don't want to use the locale default formats, you can specify a custom date pattern: >>> format_date(d, "EEE, MMM d, ''yy", locale='en') u"Sun, Apr 1, '07" :param date: the ``date`` or ``datetime`` object; if `None`, the current date is used :param format: one of "full", "long", "medium", or "short", or a custom date/time pattern :param locale: a `Locale` object or a locale identifier """ if date is None: date = date_.today() elif isinstance(date, datetime): date = date.date() locale = Locale.parse(locale) if format in ('full', 'long', 'medium', 'short'): format = get_date_format(format, locale=locale) pattern = parse_pattern(format) return pattern.apply(date, locale) def format_datetime(datetime=None, format='medium', tzinfo=None, locale=LC_TIME): r"""Return a date formatted according to the given pattern. >>> dt = datetime(2007, 04, 01, 15, 30) >>> format_datetime(dt, locale='en_US') u'Apr 1, 2007, 3:30:00 PM' For any pattern requiring the display of the time-zone, the third-party ``pytz`` package is needed to explicitly specify the time-zone: >>> format_datetime(dt, 'full', tzinfo=get_timezone('Europe/Paris'), ... locale='fr_FR') u'dimanche 1 avril 2007 17:30:00 heure avanc\xe9e d\u2019Europe centrale' >>> format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz", ... tzinfo=get_timezone('US/Eastern'), locale='en') u'2007.04.01 AD at 11:30:00 EDT' :param datetime: the `datetime` object; if `None`, the current date and time is used :param format: one of "full", "long", "medium", or "short", or a custom date/time pattern :param tzinfo: the timezone to apply to the time for display :param locale: a `Locale` object or a locale identifier """ if datetime is None: datetime = datetime_.utcnow() elif isinstance(datetime, number_types): datetime = datetime_.utcfromtimestamp(datetime) elif isinstance(datetime, time): datetime = datetime_.combine(date.today(), datetime) if datetime.tzinfo is None: datetime = datetime.replace(tzinfo=UTC) if tzinfo is not None: datetime = datetime.astimezone(get_timezone(tzinfo)) if hasattr(tzinfo, 'normalize'): # pytz datetime = tzinfo.normalize(datetime) locale = Locale.parse(locale) if format in ('full', 'long', 'medium', 'short'): return get_datetime_format(format, locale=locale) \ .replace("'", "") \ .replace('{0}', format_time(datetime, format, tzinfo=None, locale=locale)) \ .replace('{1}', format_date(datetime, format, locale=locale)) else: return parse_pattern(format).apply(datetime, locale) def format_time(time=None, format='medium', tzinfo=None, locale=LC_TIME): r"""Return a time formatted according to the given pattern. >>> t = time(15, 30) >>> format_time(t, locale='en_US') u'3:30:00 PM' >>> format_time(t, format='short', locale='de_DE') u'15:30' If you don't want to use the locale default formats, you can specify a custom time pattern: >>> format_time(t, "hh 'o''clock' a", locale='en') u"03 o'clock PM" For any pattern requiring the display of the time-zone a timezone has to be specified explicitly: >>> t = datetime(2007, 4, 1, 15, 30) >>> tzinfo = get_timezone('Europe/Paris') >>> t = tzinfo.localize(t) >>> format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR') u'15:30:00 heure avanc\xe9e d\u2019Europe centrale' >>> format_time(t, "hh 'o''clock' a, zzzz", tzinfo=get_timezone('US/Eastern'), ... locale='en') u"09 o'clock AM, Eastern Daylight Time" As that example shows, when this function gets passed a ``datetime.datetime`` value, the actual time in the formatted string is adjusted to the timezone specified by the `tzinfo` parameter. If the ``datetime`` is "naive" (i.e. it has no associated timezone information), it is assumed to be in UTC. These timezone calculations are **not** performed if the value is of type ``datetime.time``, as without date information there's no way to determine what a given time would translate to in a different timezone without information about whether daylight savings time is in effect or not. This means that time values are left as-is, and the value of the `tzinfo` parameter is only used to display the timezone name if needed: >>> t = time(15, 30) >>> format_time(t, format='full', tzinfo=get_timezone('Europe/Paris'), ... locale='fr_FR') u'15:30:00 heure normale de l\u2019Europe centrale' >>> format_time(t, format='full', tzinfo=get_timezone('US/Eastern'), ... locale='en_US') u'3:30:00 PM Eastern Standard Time' :param time: the ``time`` or ``datetime`` object; if `None`, the current time in UTC is used :param format: one of "full", "long", "medium", or "short", or a custom date/time pattern :param tzinfo: the time-zone to apply to the time for display :param locale: a `Locale` object or a locale identifier """ if time is None: time = datetime.utcnow() elif isinstance(time, number_types): time = datetime.utcfromtimestamp(time) if time.tzinfo is None: time = time.replace(tzinfo=UTC) if isinstance(time, datetime): if tzinfo is not None: time = time.astimezone(tzinfo) if hasattr(tzinfo, 'normalize'): # pytz time = tzinfo.normalize(time) time = time.timetz() elif tzinfo is not None: time = time.replace(tzinfo=tzinfo) locale = Locale.parse(locale) if format in ('full', 'long', 'medium', 'short'): format = get_time_format(format, locale=locale) return parse_pattern(format).apply(time, locale) TIMEDELTA_UNITS = ( ('year', 3600 * 24 * 365), ('month', 3600 * 24 * 30), ('week', 3600 * 24 * 7), ('day', 3600 * 24), ('hour', 3600), ('minute', 60), ('second', 1) ) def format_timedelta(delta, granularity='second', threshold=.85, add_direction=False, format='medium', locale=LC_TIME): """Return a time delta according to the rules of the given locale. >>> format_timedelta(timedelta(weeks=12), locale='en_US') u'3 months' >>> format_timedelta(timedelta(seconds=1), locale='es') u'1 segundo' The granularity parameter can be provided to alter the lowest unit presented, which defaults to a second. >>> format_timedelta(timedelta(hours=3), granularity='day', ... locale='en_US') u'1 day' The threshold parameter can be used to determine at which value the presentation switches to the next higher unit. A higher threshold factor means the presentation will switch later. For example: >>> format_timedelta(timedelta(hours=23), threshold=0.9, locale='en_US') u'1 day' >>> format_timedelta(timedelta(hours=23), threshold=1.1, locale='en_US') u'23 hours' In addition directional information can be provided that informs the user if the date is in the past or in the future: >>> format_timedelta(timedelta(hours=1), add_direction=True) u'In 1 hour' >>> format_timedelta(timedelta(hours=-1), add_direction=True) u'1 hour ago' :param delta: a ``timedelta`` object representing the time difference to format, or the delta in seconds as an `int` value :param granularity: determines the smallest unit that should be displayed, the value can be one of "year", "month", "week", "day", "hour", "minute" or "second" :param threshold: factor that determines at which point the presentation switches to the next higher unit :param add_direction: if this flag is set to `True` the return value will include directional information. For instance a positive timedelta will include the information about it being in the future, a negative will be information about the value being in the past. :param format: the format (currently only "medium" and "short" are supported) :param locale: a `Locale` object or a locale identifier """ if format not in ('short', 'medium'): raise TypeError('Format can only be one of "short" or "medium"') if isinstance(delta, timedelta): seconds = int((delta.days * 86400) + delta.seconds) else: seconds = delta locale = Locale.parse(locale) def _iter_choices(unit): if add_direction: if seconds >= 0: yield unit + '-future' else: yield unit + '-past' yield unit + ':' + format yield unit for unit, secs_per_unit in TIMEDELTA_UNITS: value = abs(seconds) / secs_per_unit if value >= threshold or unit == granularity: if unit == granularity and value > 0: value = max(1, value) value = int(round(value)) plural_form = locale.plural_form(value) pattern = None for choice in _iter_choices(unit): patterns = locale._data['unit_patterns'].get(choice) if patterns is not None: pattern = patterns[plural_form] break # This really should not happen if pattern is None: return u'' return pattern.replace('{0}', str(value)) return u'' def parse_date(string, locale=LC_TIME): """Parse a date from a string. This function uses the date format for the locale as a hint to determine the order in which the date fields appear in the string. >>> parse_date('4/1/04', locale='en_US') datetime.date(2004, 4, 1) >>> parse_date('01.04.2004', locale='de_DE') datetime.date(2004, 4, 1) :param string: the string containing the date :param locale: a `Locale` object or a locale identifier """ # TODO: try ISO format first? format = get_date_format(locale=locale).pattern.lower() year_idx = format.index('y') month_idx = format.index('m') if month_idx < 0: month_idx = format.index('l') day_idx = format.index('d') indexes = [(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')] indexes.sort() indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)]) # FIXME: this currently only supports numbers, but should also support month # names, both in the requested locale, and english numbers = re.findall('(\d+)', string) year = numbers[indexes['Y']] if len(year) == 2: year = 2000 + int(year) else: year = int(year) month = int(numbers[indexes['M']]) day = int(numbers[indexes['D']]) if month > 12: month, day = day, month return date(year, month, day) def parse_time(string, locale=LC_TIME): """Parse a time from a string. This function uses the time format for the locale as a hint to determine the order in which the time fields appear in the string. >>> parse_time('15:30:00', locale='en_US') datetime.time(15, 30) :param string: the string containing the time :param locale: a `Locale` object or a locale identifier :return: the parsed time :rtype: `time` """ # TODO: try ISO format first? format = get_time_format(locale=locale).pattern.lower() hour_idx = format.index('h') if hour_idx < 0: hour_idx = format.index('k') min_idx = format.index('m') sec_idx = format.index('s') indexes = [(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')] indexes.sort() indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)]) # FIXME: support 12 hour clock, and 0-based hour specification # and seconds should be optional, maybe minutes too # oh, and time-zones, of course numbers = re.findall('(\d+)', string) hour = int(numbers[indexes['H']]) minute = int(numbers[indexes['M']]) second = int(numbers[indexes['S']]) return time(hour, minute, second) class DateTimePattern(object): def __init__(self, pattern, format): self.pattern = pattern self.format = format def __repr__(self): return '<%s %r>' % (type(self).__name__, self.pattern) def __unicode__(self): return self.pattern def __mod__(self, other): if type(other) is not DateTimeFormat: return NotImplemented return self.format % other def apply(self, datetime, locale): return self % DateTimeFormat(datetime, locale) class DateTimeFormat(object): def __init__(self, value, locale): assert isinstance(value, (date, datetime, time)) if isinstance(value, (datetime, time)) and value.tzinfo is None: value = value.replace(tzinfo=UTC) self.value = value self.locale = Locale.parse(locale) def __getitem__(self, name): char = name[0] num = len(name) if char == 'G': return self.format_era(char, num) elif char in ('y', 'Y', 'u'): return self.format_year(char, num) elif char in ('Q', 'q'): return self.format_quarter(char, num) elif char in ('M', 'L'): return self.format_month(char, num) elif char in ('w', 'W'): return self.format_week(char, num) elif char == 'd': return self.format(self.value.day, num) elif char == 'D': return self.format_day_of_year(num) elif char == 'F': return self.format_day_of_week_in_month() elif char in ('E', 'e', 'c'): return self.format_weekday(char, num) elif char == 'a': return self.format_period(char) elif char == 'h': if self.value.hour % 12 == 0: return self.format(12, num) else: return self.format(self.value.hour % 12, num) elif char == 'H': return self.format(self.value.hour, num) elif char == 'K': return self.format(self.value.hour % 12, num) elif char == 'k': if self.value.hour == 0: return self.format(24, num) else: return self.format(self.value.hour, num) elif char == 'm': return self.format(self.value.minute, num) elif char == 's': return self.format(self.value.second, num) elif char == 'S': return self.format_frac_seconds(num) elif char == 'A': return self.format_milliseconds_in_day(num) elif char in ('z', 'Z', 'v', 'V'): return self.format_timezone(char, num) else: raise KeyError('Unsupported date/time field %r' % char) def format_era(self, char, num): width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)] era = int(self.value.year >= 0) return get_era_names(width, self.locale)[era] def format_year(self, char, num): value = self.value.year if char.isupper(): week = self.get_week_number(self.get_day_of_year()) if week == 0: value -= 1 year = self.format(value, num) if num == 2: year = year[-2:] return year def format_quarter(self, char, num): quarter = (self.value.month - 1) // 3 + 1 if num <= 2: return ('%%0%dd' % num) % quarter width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] context = {'Q': 'format', 'q': 'stand-alone'}[char] return get_quarter_names(width, context, self.locale)[quarter] def format_month(self, char, num): if num <= 2: return ('%%0%dd' % num) % self.value.month width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] context = {'M': 'format', 'L': 'stand-alone'}[char] return get_month_names(width, context, self.locale)[self.value.month] def format_week(self, char, num): if char.islower(): # week of year day_of_year = self.get_day_of_year() week = self.get_week_number(day_of_year) if week == 0: date = self.value - timedelta(days=day_of_year) week = self.get_week_number(self.get_day_of_year(date), date.weekday()) return self.format(week, num) else: # week of month week = self.get_week_number(self.value.day) if week == 0: date = self.value - timedelta(days=self.value.day) week = self.get_week_number(date.day, date.weekday()) pass return '%d' % week def format_weekday(self, char, num): if num < 3: if char.islower(): value = 7 - self.locale.first_week_day + self.value.weekday() return self.format(value % 7 + 1, num) num = 3 weekday = self.value.weekday() width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] context = {3: 'format', 4: 'format', 5: 'stand-alone'}[num] return get_day_names(width, context, self.locale)[weekday] def format_day_of_year(self, num): return self.format(self.get_day_of_year(), num) def format_day_of_week_in_month(self): return '%d' % ((self.value.day - 1) // 7 + 1) def format_period(self, char): period = {0: 'am', 1: 'pm'}[int(self.value.hour >= 12)] return get_period_names(locale=self.locale)[period] def format_frac_seconds(self, num): value = str(self.value.microsecond) return self.format(round(float('.%s' % value), num) * 10**num, num) def format_milliseconds_in_day(self, num): msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \ self.value.minute * 60000 + self.value.hour * 3600000 return self.format(msecs, num) def format_timezone(self, char, num): width = {3: 'short', 4: 'long'}[max(3, num)] if char == 'z': return get_timezone_name(self.value, width, locale=self.locale) elif char == 'Z': return get_timezone_gmt(self.value, width, locale=self.locale) elif char == 'v': return get_timezone_name(self.value.tzinfo, width, locale=self.locale) elif char == 'V': if num == 1: return get_timezone_name(self.value.tzinfo, width, uncommon=True, locale=self.locale) return get_timezone_location(self.value.tzinfo, locale=self.locale) def format(self, value, length): return ('%%0%dd' % length) % value def get_day_of_year(self, date=None): if date is None: date = self.value return (date - date.replace(month=1, day=1)).days + 1 def get_week_number(self, day_of_period, day_of_week=None): """Return the number of the week of a day within a period. This may be the week number in a year or the week number in a month. Usually this will return a value equal to or greater than 1, but if the first week of the period is so short that it actually counts as the last week of the previous period, this function will return 0. >>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE')) >>> format.get_week_number(6) 1 >>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US')) >>> format.get_week_number(6) 2 :param day_of_period: the number of the day in the period (usually either the day of month or the day of year) :param day_of_week: the week day; if ommitted, the week day of the current date is assumed """ if day_of_week is None: day_of_week = self.value.weekday() first_day = (day_of_week - self.locale.first_week_day - day_of_period + 1) % 7 if first_day < 0: first_day += 7 week_number = (day_of_period + first_day - 1) // 7 if 7 - first_day >= self.locale.min_week_days: week_number += 1 return week_number PATTERN_CHARS = { 'G': [1, 2, 3, 4, 5], # era 'y': None, 'Y': None, 'u': None, # year 'Q': [1, 2, 3, 4], 'q': [1, 2, 3, 4], # quarter 'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], # month 'w': [1, 2], 'W': [1], # week 'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, # day 'E': [1, 2, 3, 4, 5], 'e': [1, 2, 3, 4, 5], 'c': [1, 3, 4, 5], # week day 'a': [1], # period 'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2], # hour 'm': [1, 2], # minute 's': [1, 2], 'S': None, 'A': None, # second 'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4], 'v': [1, 4], 'V': [1, 4] # zone } def parse_pattern(pattern): """Parse date, time, and datetime format patterns. >>> parse_pattern("MMMMd").format u'%(MMMM)s%(d)s' >>> parse_pattern("MMM d, yyyy").format u'%(MMM)s %(d)s, %(yyyy)s' Pattern can contain literal strings in single quotes: >>> parse_pattern("H:mm' Uhr 'z").format u'%(H)s:%(mm)s Uhr %(z)s' An actual single quote can be used by using two adjacent single quote characters: >>> parse_pattern("hh' o''clock'").format u"%(hh)s o'clock" :param pattern: the formatting pattern to parse """ if type(pattern) is DateTimePattern: return pattern result = [] quotebuf = None charbuf = [] fieldchar = [''] fieldnum = [0] def append_chars(): result.append(''.join(charbuf).replace('%', '%%')) del charbuf[:] def append_field(): limit = PATTERN_CHARS[fieldchar[0]] if limit and fieldnum[0] not in limit: raise ValueError('Invalid length for field: %r' % (fieldchar[0] * fieldnum[0])) result.append('%%(%s)s' % (fieldchar[0] * fieldnum[0])) fieldchar[0] = '' fieldnum[0] = 0 for idx, char in enumerate(pattern.replace("''", '\0')): if quotebuf is None: if char == "'": # quote started if fieldchar[0]: append_field() elif charbuf: append_chars() quotebuf = [] elif char in PATTERN_CHARS: if charbuf: append_chars() if char == fieldchar[0]: fieldnum[0] += 1 else: if fieldchar[0]: append_field() fieldchar[0] = char fieldnum[0] = 1 else: if fieldchar[0]: append_field() charbuf.append(char) elif quotebuf is not None: if char == "'": # end of quote charbuf.extend(quotebuf) quotebuf = None else: # inside quote quotebuf.append(char) if fieldchar[0]: append_field() elif charbuf: append_chars() return DateTimePattern(pattern, u''.join(result).replace('\0', "'"))
mit
dkodnik/arp
addons/hr_recruitment/wizard/__init__.py
381
1095
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-Today OpenERP (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import hr_recruitment_create_partner_job # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
GodBlessPP/w16b_test
static/Brython3.1.3-20150514-095342/Lib/http/cookies.py
735
20810
#!/usr/bin/env python3 # #### # Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu> # # All Rights Reserved # # Permission to use, copy, modify, and distribute this software # and its documentation for any purpose and without fee is hereby # granted, provided that the above copyright notice appear in all # copies and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Timothy O'Malley not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS # SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR # ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. # #### # # Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp # by Timothy O'Malley <timo@alum.mit.edu> # # Cookie.py is a Python module for the handling of HTTP # cookies as a Python dictionary. See RFC 2109 for more # information on cookies. # # The original idea to treat Cookies as a dictionary came from # Dave Mitchell (davem@magnet.com) in 1995, when he released the # first version of nscookie.py. # #### r""" Here's a sample session to show how to use this module. At the moment, this is the only documentation. The Basics ---------- Importing is easy... >>> from http import cookies Most of the time you start by creating a cookie. >>> C = cookies.SimpleCookie() Once you've created your Cookie, you can add values just as if it were a dictionary. >>> C = cookies.SimpleCookie() >>> C["fig"] = "newton" >>> C["sugar"] = "wafer" >>> C.output() 'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer' Notice that the printable representation of a Cookie is the appropriate format for a Set-Cookie: header. This is the default behavior. You can change the header and printed attributes by using the .output() function >>> C = cookies.SimpleCookie() >>> C["rocky"] = "road" >>> C["rocky"]["path"] = "/cookie" >>> print(C.output(header="Cookie:")) Cookie: rocky=road; Path=/cookie >>> print(C.output(attrs=[], header="Cookie:")) Cookie: rocky=road The load() method of a Cookie extracts cookies from a string. In a CGI script, you would use this method to extract the cookies from the HTTP_COOKIE environment variable. >>> C = cookies.SimpleCookie() >>> C.load("chips=ahoy; vienna=finger") >>> C.output() 'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger' The load() method is darn-tootin smart about identifying cookies within a string. Escaped quotation marks, nested semicolons, and other such trickeries do not confuse it. >>> C = cookies.SimpleCookie() >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";') >>> print(C) Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;" Each element of the Cookie also supports all of the RFC 2109 Cookie attributes. Here's an example which sets the Path attribute. >>> C = cookies.SimpleCookie() >>> C["oreo"] = "doublestuff" >>> C["oreo"]["path"] = "/" >>> print(C) Set-Cookie: oreo=doublestuff; Path=/ Each dictionary element has a 'value' attribute, which gives you back the value associated with the key. >>> C = cookies.SimpleCookie() >>> C["twix"] = "none for you" >>> C["twix"].value 'none for you' The SimpleCookie expects that all values should be standard strings. Just to be sure, SimpleCookie invokes the str() builtin to convert the value to a string, when the values are set dictionary-style. >>> C = cookies.SimpleCookie() >>> C["number"] = 7 >>> C["string"] = "seven" >>> C["number"].value '7' >>> C["string"].value 'seven' >>> C.output() 'Set-Cookie: number=7\r\nSet-Cookie: string=seven' Finis. """ # # Import our required modules # import re import string __all__ = ["CookieError", "BaseCookie", "SimpleCookie"] _nulljoin = ''.join _semispacejoin = '; '.join _spacejoin = ' '.join # # Define an exception visible to External modules # class CookieError(Exception): pass # These quoting routines conform to the RFC2109 specification, which in # turn references the character definitions from RFC2068. They provide # a two-way quoting algorithm. Any non-text character is translated # into a 4 character sequence: a forward-slash followed by the # three-digit octal equivalent of the character. Any '\' or '"' is # quoted with a preceeding '\' slash. # # These are taken from RFC2068 and RFC2109. # _LegalChars is the list of chars which don't require "'s # _Translator hash-table for fast quoting # _LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:" _Translator = { '\000' : '\\000', '\001' : '\\001', '\002' : '\\002', '\003' : '\\003', '\004' : '\\004', '\005' : '\\005', '\006' : '\\006', '\007' : '\\007', '\010' : '\\010', '\011' : '\\011', '\012' : '\\012', '\013' : '\\013', '\014' : '\\014', '\015' : '\\015', '\016' : '\\016', '\017' : '\\017', '\020' : '\\020', '\021' : '\\021', '\022' : '\\022', '\023' : '\\023', '\024' : '\\024', '\025' : '\\025', '\026' : '\\026', '\027' : '\\027', '\030' : '\\030', '\031' : '\\031', '\032' : '\\032', '\033' : '\\033', '\034' : '\\034', '\035' : '\\035', '\036' : '\\036', '\037' : '\\037', # Because of the way browsers really handle cookies (as opposed # to what the RFC says) we also encode , and ; ',' : '\\054', ';' : '\\073', '"' : '\\"', '\\' : '\\\\', '\177' : '\\177', '\200' : '\\200', '\201' : '\\201', '\202' : '\\202', '\203' : '\\203', '\204' : '\\204', '\205' : '\\205', '\206' : '\\206', '\207' : '\\207', '\210' : '\\210', '\211' : '\\211', '\212' : '\\212', '\213' : '\\213', '\214' : '\\214', '\215' : '\\215', '\216' : '\\216', '\217' : '\\217', '\220' : '\\220', '\221' : '\\221', '\222' : '\\222', '\223' : '\\223', '\224' : '\\224', '\225' : '\\225', '\226' : '\\226', '\227' : '\\227', '\230' : '\\230', '\231' : '\\231', '\232' : '\\232', '\233' : '\\233', '\234' : '\\234', '\235' : '\\235', '\236' : '\\236', '\237' : '\\237', '\240' : '\\240', '\241' : '\\241', '\242' : '\\242', '\243' : '\\243', '\244' : '\\244', '\245' : '\\245', '\246' : '\\246', '\247' : '\\247', '\250' : '\\250', '\251' : '\\251', '\252' : '\\252', '\253' : '\\253', '\254' : '\\254', '\255' : '\\255', '\256' : '\\256', '\257' : '\\257', '\260' : '\\260', '\261' : '\\261', '\262' : '\\262', '\263' : '\\263', '\264' : '\\264', '\265' : '\\265', '\266' : '\\266', '\267' : '\\267', '\270' : '\\270', '\271' : '\\271', '\272' : '\\272', '\273' : '\\273', '\274' : '\\274', '\275' : '\\275', '\276' : '\\276', '\277' : '\\277', '\300' : '\\300', '\301' : '\\301', '\302' : '\\302', '\303' : '\\303', '\304' : '\\304', '\305' : '\\305', '\306' : '\\306', '\307' : '\\307', '\310' : '\\310', '\311' : '\\311', '\312' : '\\312', '\313' : '\\313', '\314' : '\\314', '\315' : '\\315', '\316' : '\\316', '\317' : '\\317', '\320' : '\\320', '\321' : '\\321', '\322' : '\\322', '\323' : '\\323', '\324' : '\\324', '\325' : '\\325', '\326' : '\\326', '\327' : '\\327', '\330' : '\\330', '\331' : '\\331', '\332' : '\\332', '\333' : '\\333', '\334' : '\\334', '\335' : '\\335', '\336' : '\\336', '\337' : '\\337', '\340' : '\\340', '\341' : '\\341', '\342' : '\\342', '\343' : '\\343', '\344' : '\\344', '\345' : '\\345', '\346' : '\\346', '\347' : '\\347', '\350' : '\\350', '\351' : '\\351', '\352' : '\\352', '\353' : '\\353', '\354' : '\\354', '\355' : '\\355', '\356' : '\\356', '\357' : '\\357', '\360' : '\\360', '\361' : '\\361', '\362' : '\\362', '\363' : '\\363', '\364' : '\\364', '\365' : '\\365', '\366' : '\\366', '\367' : '\\367', '\370' : '\\370', '\371' : '\\371', '\372' : '\\372', '\373' : '\\373', '\374' : '\\374', '\375' : '\\375', '\376' : '\\376', '\377' : '\\377' } def _quote(str, LegalChars=_LegalChars): r"""Quote a string for use in a cookie header. If the string does not need to be double-quoted, then just return the string. Otherwise, surround the string in doublequotes and quote (with a \) special characters. """ if all(c in LegalChars for c in str): return str else: return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"' _OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") _QuotePatt = re.compile(r"[\\].") def _unquote(str): # If there aren't any doublequotes, # then there can't be any special characters. See RFC 2109. if len(str) < 2: return str if str[0] != '"' or str[-1] != '"': return str # We have to assume that we must decode this string. # Down to work. # Remove the "s str = str[1:-1] # Check for special sequences. Examples: # \012 --> \n # \" --> " # i = 0 n = len(str) res = [] while 0 <= i < n: o_match = _OctalPatt.search(str, i) q_match = _QuotePatt.search(str, i) if not o_match and not q_match: # Neither matched res.append(str[i:]) break # else: j = k = -1 if o_match: j = o_match.start(0) if q_match: k = q_match.start(0) if q_match and (not o_match or k < j): # QuotePatt matched res.append(str[i:k]) res.append(str[k+1]) i = k + 2 else: # OctalPatt matched res.append(str[i:j]) res.append(chr(int(str[j+1:j+4], 8))) i = j + 4 return _nulljoin(res) # The _getdate() routine is used to set the expiration time in the cookie's HTTP # header. By default, _getdate() returns the current time in the appropriate # "expires" format for a Set-Cookie header. The one optional argument is an # offset from now, in seconds. For example, an offset of -3600 means "one hour # ago". The offset may be a floating point number. # _weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] _monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname): from time import gmtime, time now = time() year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future) return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \ (weekdayname[wd], day, monthname[month], year, hh, mm, ss) class Morsel(dict): """A class to hold ONE (key, value) pair. In a cookie, each such pair may have several attributes, so this class is used to keep the attributes associated with the appropriate key,value pair. This class also includes a coded_value attribute, which is used to hold the network representation of the value. This is most useful when Python objects are pickled for network transit. """ # RFC 2109 lists these attributes as reserved: # path comment domain # max-age secure version # # For historical reasons, these attributes are also reserved: # expires # # This is an extension from Microsoft: # httponly # # This dictionary provides a mapping from the lowercase # variant on the left to the appropriate traditional # formatting on the right. _reserved = { "expires" : "expires", "path" : "Path", "comment" : "Comment", "domain" : "Domain", "max-age" : "Max-Age", "secure" : "secure", "httponly" : "httponly", "version" : "Version", } _flags = {'secure', 'httponly'} def __init__(self): # Set defaults self.key = self.value = self.coded_value = None # Set default attributes for key in self._reserved: dict.__setitem__(self, key, "") def __setitem__(self, K, V): K = K.lower() if not K in self._reserved: raise CookieError("Invalid Attribute %s" % K) dict.__setitem__(self, K, V) def isReservedKey(self, K): return K.lower() in self._reserved def set(self, key, val, coded_val, LegalChars=_LegalChars): # First we verify that the key isn't a reserved word # Second we make sure it only contains legal characters if key.lower() in self._reserved: raise CookieError("Attempt to set a reserved key: %s" % key) if any(c not in LegalChars for c in key): raise CookieError("Illegal key value: %s" % key) # It's a good key, so save it. self.key = key self.value = val self.coded_value = coded_val def output(self, attrs=None, header="Set-Cookie:"): return "%s %s" % (header, self.OutputString(attrs)) __str__ = output def __repr__(self): return '<%s: %s=%s>' % (self.__class__.__name__, self.key, repr(self.value)) def js_output(self, attrs=None): # Print javascript return """ <script type="text/javascript"> <!-- begin hiding document.cookie = \"%s\"; // end hiding --> </script> """ % (self.OutputString(attrs).replace('"', r'\"')) def OutputString(self, attrs=None): # Build up our result # result = [] append = result.append # First, the key=value pair append("%s=%s" % (self.key, self.coded_value)) # Now add any defined attributes if attrs is None: attrs = self._reserved items = sorted(self.items()) for key, value in items: if value == "": continue if key not in attrs: continue if key == "expires" and isinstance(value, int): append("%s=%s" % (self._reserved[key], _getdate(value))) elif key == "max-age" and isinstance(value, int): append("%s=%d" % (self._reserved[key], value)) elif key == "secure": append(str(self._reserved[key])) elif key == "httponly": append(str(self._reserved[key])) else: append("%s=%s" % (self._reserved[key], value)) # Return the result return _semispacejoin(result) # # Pattern for finding cookie # # This used to be strict parsing based on the RFC2109 and RFC2068 # specifications. I have since discovered that MSIE 3.0x doesn't # follow the character rules outlined in those specs. As a # result, the parsing rules here are less strict. # _LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]" _CookiePattern = re.compile(r""" (?x) # This is a verbose pattern (?P<key> # Start of group 'key' """ + _LegalCharsPatt + r"""+? # Any word of at least one letter ) # End of group 'key' ( # Optional group: there may not be a value. \s*=\s* # Equal Sign (?P<val> # Start of group 'val' "(?:[^\\"]|\\.)*" # Any doublequoted string | # or \w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr | # or """ + _LegalCharsPatt + r"""* # Any word or empty string ) # End of group 'val' )? # End of optional value group \s* # Any number of spaces. (\s+|;|$) # Ending either at space, semicolon, or EOS. """, re.ASCII) # May be removed if safe. # At long last, here is the cookie class. Using this class is almost just like # using a dictionary. See this module's docstring for example usage. # class BaseCookie(dict): """A container class for a set of Morsels.""" def value_decode(self, val): """real_value, coded_value = value_decode(STRING) Called prior to setting a cookie's value from the network representation. The VALUE is the value read from HTTP header. Override this function to modify the behavior of cookies. """ return val, val def value_encode(self, val): """real_value, coded_value = value_encode(VALUE) Called prior to setting a cookie's value from the dictionary representation. The VALUE is the value being assigned. Override this function to modify the behavior of cookies. """ strval = str(val) return strval, strval def __init__(self, input=None): if input: self.load(input) def __set(self, key, real_value, coded_value): """Private method for setting a cookie's value""" M = self.get(key, Morsel()) M.set(key, real_value, coded_value) dict.__setitem__(self, key, M) def __setitem__(self, key, value): """Dictionary style assignment.""" rval, cval = self.value_encode(value) self.__set(key, rval, cval) def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"): """Return a string suitable for HTTP.""" result = [] items = sorted(self.items()) for key, value in items: result.append(value.output(attrs, header)) return sep.join(result) __str__ = output def __repr__(self): l = [] items = sorted(self.items()) for key, value in items: l.append('%s=%s' % (key, repr(value.value))) return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l)) def js_output(self, attrs=None): """Return a string suitable for JavaScript.""" result = [] items = sorted(self.items()) for key, value in items: result.append(value.js_output(attrs)) return _nulljoin(result) def load(self, rawdata): """Load cookies from a string (presumably HTTP_COOKIE) or from a dictionary. Loading cookies from a dictionary 'd' is equivalent to calling: map(Cookie.__setitem__, d.keys(), d.values()) """ if isinstance(rawdata, str): self.__parse_string(rawdata) else: # self.update() wouldn't call our custom __setitem__ for key, value in rawdata.items(): self[key] = value return def __parse_string(self, str, patt=_CookiePattern): i = 0 # Our starting point n = len(str) # Length of string M = None # current morsel while 0 <= i < n: # Start looking for a cookie match = patt.search(str, i) if not match: # No more cookies break key, value = match.group("key"), match.group("val") i = match.end(0) # Parse the key, value in case it's metainfo if key[0] == "$": # We ignore attributes which pertain to the cookie # mechanism as a whole. See RFC 2109. # (Does anyone care?) if M: M[key[1:]] = value elif key.lower() in Morsel._reserved: if M: if value is None: if key.lower() in Morsel._flags: M[key] = True else: M[key] = _unquote(value) elif value is not None: rval, cval = self.value_decode(value) self.__set(key, rval, cval) M = self[key] class SimpleCookie(BaseCookie): """ SimpleCookie supports strings as cookie values. When setting the value using the dictionary assignment notation, SimpleCookie calls the builtin str() to convert the value to a string. Values received from HTTP are kept as strings. """ def value_decode(self, val): return _unquote(val), val def value_encode(self, val): strval = str(val) return strval, _quote(strval)
agpl-3.0
virajs/selenium-1
py/selenium/selenium.py
33
80518
""" Copyright 2011 Software Freedom Conservancy. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __docformat__ = "restructuredtext en" import httplib import urllib class selenium(object): """ Defines an object that runs Selenium commands. **Element Locators** Element Locators tell Selenium which HTML element a command refers to. The format of a locator is: \ *locatorType*\ **=**\ \ *argument* We support the following strategies for locating elements: * \ **identifier**\ =\ *id*: Select the element with the specified @id attribute. If no match is found, select the first element whose @name attribute is \ *id*. (This is normally the default; see below.) * \ **id**\ =\ *id*: Select the element with the specified @id attribute. * \ **name**\ =\ *name*: Select the first element with the specified @name attribute. * username * name=username The name may optionally be followed by one or more \ *element-filters*, separated from the name by whitespace. If the \ *filterType* is not specified, \ **value**\ is assumed. * name=flavour value=chocolate * \ **dom**\ =\ *javascriptExpression*: Find an element by evaluating the specified string. This allows you to traverse the HTML Document Object Model using JavaScript. Note that you must not return a value in this string; simply make it the last expression in the block. * dom=document.forms['myForm'].myDropdown * dom=document.images[56] * dom=function foo() { return document.links[1]; }; foo(); * \ **xpath**\ =\ *xpathExpression*: Locate an element using an XPath expression. * xpath=//img[@alt='The image alt text'] * xpath=//table[@id='table1']//tr[4]/td[2] * xpath=//a[contains(@href,'#id1')] * xpath=//a[contains(@href,'#id1')]/@class * xpath=(//table[@class='stylee'])//th[text()='theHeaderText']/../td * xpath=//input[@name='name2' and @value='yes'] * xpath=//\*[text()="right"] * \ **link**\ =\ *textPattern*: Select the link (anchor) element which contains text matching the specified \ *pattern*. * link=The link text * \ **css**\ =\ *cssSelectorSyntax*: Select the element using css selectors. Please refer to CSS2 selectors, CSS3 selectors for more information. You can also check the TestCssLocators test in the selenium test suite for an example of usage, which is included in the downloaded selenium core package. * css=a[href="#id3"] * css=span#firstChild + span Currently the css selector locator supports all css1, css2 and css3 selectors except namespace in css3, some pseudo classes(:nth-of-type, :nth-last-of-type, :first-of-type, :last-of-type, :only-of-type, :visited, :hover, :active, :focus, :indeterminate) and pseudo elements(::first-line, ::first-letter, ::selection, ::before, ::after). * \ **ui**\ =\ *uiSpecifierString*: Locate an element by resolving the UI specifier string to another locator, and evaluating it. See the Selenium UI-Element Reference for more details. * ui=loginPages::loginButton() * ui=settingsPages::toggle(label=Hide Email) * ui=forumPages::postBody(index=2)//a[2] Without an explicit locator prefix, Selenium uses the following default strategies: * \ **dom**\ , for locators starting with "document." * \ **xpath**\ , for locators starting with "//" * \ **identifier**\ , otherwise **Element Filters** Element filters can be used with a locator to refine a list of candidate elements. They are currently used only in the 'name' element-locator. Filters look much like locators, ie. \ *filterType*\ **=**\ \ *argument* Supported element-filters are: \ **value=**\ \ *valuePattern* Matches elements based on their values. This is particularly useful for refining a list of similarly-named toggle-buttons. \ **index=**\ \ *index* Selects a single element based on its position in the list (offset from zero). **String-match Patterns** Various Pattern syntaxes are available for matching string values: * \ **glob:**\ \ *pattern*: Match a string against a "glob" (aka "wildmat") pattern. "Glob" is a kind of limited regular-expression syntax typically used in command-line shells. In a glob pattern, "\*" represents any sequence of characters, and "?" represents any single character. Glob patterns match against the entire string. * \ **regexp:**\ \ *regexp*: Match a string using a regular-expression. The full power of JavaScript regular-expressions is available. * \ **regexpi:**\ \ *regexpi*: Match a string using a case-insensitive regular-expression. * \ **exact:**\ \ *string*: Match a string exactly, verbatim, without any of that fancy wildcard stuff. If no pattern prefix is specified, Selenium assumes that it's a "glob" pattern. For commands that return multiple values (such as verifySelectOptions), the string being matched is a comma-separated list of the return values, where both commas and backslashes in the values are backslash-escaped. When providing a pattern, the optional matching syntax (i.e. glob, regexp, etc.) is specified once, as usual, at the beginning of the pattern. """ ### This part is hard-coded in the XSL def __init__(self, host, port, browserStartCommand, browserURL): self.host = host self.port = port self.browserStartCommand = browserStartCommand self.browserURL = browserURL self.sessionId = None self.extensionJs = "" def setExtensionJs(self, extensionJs): self.extensionJs = extensionJs def start(self, browserConfigurationOptions=None, driver=None): start_args = [self.browserStartCommand, self.browserURL, self.extensionJs] if browserConfigurationOptions: start_args.append(browserConfigurationOptions) if driver: id = driver.desired_capabilities['webdriver.remote.sessionid'] start_args.append('webdriver.remote.sessionid=%s' % id) result = self.get_string("getNewBrowserSession", start_args) try: self.sessionId = result except ValueError: raise Exception, result def stop(self): self.do_command("testComplete", []) self.sessionId = None def do_command(self, verb, args): conn = httplib.HTTPConnection(self.host, self.port) try: body = u'cmd=' + urllib.quote_plus(unicode(verb).encode('utf-8')) for i in range(len(args)): body += '&' + unicode(i+1) + '=' + \ urllib.quote_plus(unicode(args[i]).encode('utf-8')) if (None != self.sessionId): body += "&sessionId=" + unicode(self.sessionId) headers = { "Content-Type": "application/x-www-form-urlencoded; charset=utf-8" } conn.request("POST", "/selenium-server/driver/", body, headers) response = conn.getresponse() data = unicode(response.read(), "UTF-8") if (not data.startswith('OK')): raise Exception, data return data finally: conn.close() def get_string(self, verb, args): result = self.do_command(verb, args) return result[3:] def get_string_array(self, verb, args): csv = self.get_string(verb, args) if not csv: return [] token = "" tokens = [] escape = False for i in range(len(csv)): letter = csv[i] if (escape): token = token + letter escape = False continue if (letter == '\\'): escape = True elif (letter == ','): tokens.append(token) token = "" else: token = token + letter tokens.append(token) return tokens def get_number(self, verb, args): return int(self.get_string(verb, args)) def get_number_array(self, verb, args): string_array = self.get_string_array(verb, args) num_array = [] for i in string_array: num_array.append(int(i)) return num_array def get_boolean(self, verb, args): boolstr = self.get_string(verb, args) if ("true" == boolstr): return True if ("false" == boolstr): return False raise ValueError, "result is neither 'true' nor 'false': " + boolstr def get_boolean_array(self, verb, args): boolarr = self.get_string_array(verb, args) for i, boolstr in enumerate(boolarr): if ("true" == boolstr): boolarr[i] = True continue if ("false" == boolstr): boolarr[i] = False continue raise ValueError, "result is neither 'true' nor 'false': " + boolarr[i] return boolarr def click(self,locator): """ Clicks on a link, button, checkbox or radio button. If the click action causes a new page to load (like a link usually does), call waitForPageToLoad. 'locator' is an element locator """ self.do_command("click", [locator,]) def double_click(self,locator): """ Double clicks on a link, button, checkbox or radio button. If the double click action causes a new page to load (like a link usually does), call waitForPageToLoad. 'locator' is an element locator """ self.do_command("doubleClick", [locator,]) def context_menu(self,locator): """ Simulates opening the context menu for the specified element (as might happen if the user "right-clicked" on the element). 'locator' is an element locator """ self.do_command("contextMenu", [locator,]) def click_at(self,locator,coordString): """ Clicks on a link, button, checkbox or radio button. If the click action causes a new page to load (like a link usually does), call waitForPageToLoad. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("clickAt", [locator,coordString,]) def double_click_at(self,locator,coordString): """ Doubleclicks on a link, button, checkbox or radio button. If the action causes a new page to load (like a link usually does), call waitForPageToLoad. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("doubleClickAt", [locator,coordString,]) def context_menu_at(self,locator,coordString): """ Simulates opening the context menu for the specified element (as might happen if the user "right-clicked" on the element). 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("contextMenuAt", [locator,coordString,]) def fire_event(self,locator,eventName): """ Explicitly simulate an event, to trigger the corresponding "on\ *event*" handler. 'locator' is an element locator 'eventName' is the event name, e.g. "focus" or "blur" """ self.do_command("fireEvent", [locator,eventName,]) def focus(self,locator): """ Move the focus to the specified element; for example, if the element is an input field, move the cursor to that field. 'locator' is an element locator """ self.do_command("focus", [locator,]) def key_press(self,locator,keySequence): """ Simulates a user pressing and releasing a key. 'locator' is an element locator 'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119". """ self.do_command("keyPress", [locator,keySequence,]) def shift_key_down(self): """ Press the shift key and hold it down until doShiftUp() is called or a new page is loaded. """ self.do_command("shiftKeyDown", []) def shift_key_up(self): """ Release the shift key. """ self.do_command("shiftKeyUp", []) def meta_key_down(self): """ Press the meta key and hold it down until doMetaUp() is called or a new page is loaded. """ self.do_command("metaKeyDown", []) def meta_key_up(self): """ Release the meta key. """ self.do_command("metaKeyUp", []) def alt_key_down(self): """ Press the alt key and hold it down until doAltUp() is called or a new page is loaded. """ self.do_command("altKeyDown", []) def alt_key_up(self): """ Release the alt key. """ self.do_command("altKeyUp", []) def control_key_down(self): """ Press the control key and hold it down until doControlUp() is called or a new page is loaded. """ self.do_command("controlKeyDown", []) def control_key_up(self): """ Release the control key. """ self.do_command("controlKeyUp", []) def key_down(self,locator,keySequence): """ Simulates a user pressing a key (without releasing it yet). 'locator' is an element locator 'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119". """ self.do_command("keyDown", [locator,keySequence,]) def key_up(self,locator,keySequence): """ Simulates a user releasing a key. 'locator' is an element locator 'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119". """ self.do_command("keyUp", [locator,keySequence,]) def mouse_over(self,locator): """ Simulates a user hovering a mouse over the specified element. 'locator' is an element locator """ self.do_command("mouseOver", [locator,]) def mouse_out(self,locator): """ Simulates a user moving the mouse pointer away from the specified element. 'locator' is an element locator """ self.do_command("mouseOut", [locator,]) def mouse_down(self,locator): """ Simulates a user pressing the left mouse button (without releasing it yet) on the specified element. 'locator' is an element locator """ self.do_command("mouseDown", [locator,]) def mouse_down_right(self,locator): """ Simulates a user pressing the right mouse button (without releasing it yet) on the specified element. 'locator' is an element locator """ self.do_command("mouseDownRight", [locator,]) def mouse_down_at(self,locator,coordString): """ Simulates a user pressing the left mouse button (without releasing it yet) at the specified location. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("mouseDownAt", [locator,coordString,]) def mouse_down_right_at(self,locator,coordString): """ Simulates a user pressing the right mouse button (without releasing it yet) at the specified location. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("mouseDownRightAt", [locator,coordString,]) def mouse_up(self,locator): """ Simulates the event that occurs when the user releases the mouse button (i.e., stops holding the button down) on the specified element. 'locator' is an element locator """ self.do_command("mouseUp", [locator,]) def mouse_up_right(self,locator): """ Simulates the event that occurs when the user releases the right mouse button (i.e., stops holding the button down) on the specified element. 'locator' is an element locator """ self.do_command("mouseUpRight", [locator,]) def mouse_up_at(self,locator,coordString): """ Simulates the event that occurs when the user releases the mouse button (i.e., stops holding the button down) at the specified location. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("mouseUpAt", [locator,coordString,]) def mouse_up_right_at(self,locator,coordString): """ Simulates the event that occurs when the user releases the right mouse button (i.e., stops holding the button down) at the specified location. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("mouseUpRightAt", [locator,coordString,]) def mouse_move(self,locator): """ Simulates a user pressing the mouse button (without releasing it yet) on the specified element. 'locator' is an element locator """ self.do_command("mouseMove", [locator,]) def mouse_move_at(self,locator,coordString): """ Simulates a user pressing the mouse button (without releasing it yet) on the specified element. 'locator' is an element locator 'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator. """ self.do_command("mouseMoveAt", [locator,coordString,]) def type(self,locator,value): """ Sets the value of an input field, as though you typed it in. Can also be used to set the value of combo boxes, check boxes, etc. In these cases, value should be the value of the option selected, not the visible text. 'locator' is an element locator 'value' is the value to type """ self.do_command("type", [locator,value,]) def type_keys(self,locator,value): """ Simulates keystroke events on the specified element, as though you typed the value key-by-key. This is a convenience method for calling keyDown, keyUp, keyPress for every character in the specified string; this is useful for dynamic UI widgets (like auto-completing combo boxes) that require explicit key events. Unlike the simple "type" command, which forces the specified value into the page directly, this command may or may not have any visible effect, even in cases where typing keys would normally have a visible effect. For example, if you use "typeKeys" on a form element, you may or may not see the results of what you typed in the field. In some cases, you may need to use the simple "type" command to set the value of the field and then the "typeKeys" command to send the keystroke events corresponding to what you just typed. 'locator' is an element locator 'value' is the value to type """ self.do_command("typeKeys", [locator,value,]) def set_speed(self,value): """ Set execution speed (i.e., set the millisecond length of a delay which will follow each selenium operation). By default, there is no such delay, i.e., the delay is 0 milliseconds. 'value' is the number of milliseconds to pause after operation """ self.do_command("setSpeed", [value,]) def get_speed(self): """ Get execution speed (i.e., get the millisecond length of the delay following each selenium operation). By default, there is no such delay, i.e., the delay is 0 milliseconds. See also setSpeed. """ return self.get_string("getSpeed", []) def get_log(self): """ Get RC logs associated with current session. """ return self.get_string("getLog", []) def check(self,locator): """ Check a toggle-button (checkbox/radio) 'locator' is an element locator """ self.do_command("check", [locator,]) def uncheck(self,locator): """ Uncheck a toggle-button (checkbox/radio) 'locator' is an element locator """ self.do_command("uncheck", [locator,]) def select(self,selectLocator,optionLocator): """ Select an option from a drop-down using an option locator. Option locators provide different ways of specifying options of an HTML Select element (e.g. for selecting a specific option, or for asserting that the selected option satisfies a specification). There are several forms of Select Option Locator. * \ **label**\ =\ *labelPattern*: matches options based on their labels, i.e. the visible text. (This is the default.) * label=regexp:^[Oo]ther * \ **value**\ =\ *valuePattern*: matches options based on their values. * value=other * \ **id**\ =\ *id*: matches options based on their ids. * id=option1 * \ **index**\ =\ *index*: matches an option based on its index (offset from zero). * index=2 If no option locator prefix is provided, the default behaviour is to match on \ **label**\ . 'selectLocator' is an element locator identifying a drop-down menu 'optionLocator' is an option locator (a label by default) """ self.do_command("select", [selectLocator,optionLocator,]) def add_selection(self,locator,optionLocator): """ Add a selection to the set of selected options in a multi-select element using an option locator. @see #doSelect for details of option locators 'locator' is an element locator identifying a multi-select box 'optionLocator' is an option locator (a label by default) """ self.do_command("addSelection", [locator,optionLocator,]) def remove_selection(self,locator,optionLocator): """ Remove a selection from the set of selected options in a multi-select element using an option locator. @see #doSelect for details of option locators 'locator' is an element locator identifying a multi-select box 'optionLocator' is an option locator (a label by default) """ self.do_command("removeSelection", [locator,optionLocator,]) def remove_all_selections(self,locator): """ Unselects all of the selected options in a multi-select element. 'locator' is an element locator identifying a multi-select box """ self.do_command("removeAllSelections", [locator,]) def submit(self,formLocator): """ Submit the specified form. This is particularly useful for forms without submit buttons, e.g. single-input "Search" forms. 'formLocator' is an element locator for the form you want to submit """ self.do_command("submit", [formLocator,]) def open(self,url,ignoreResponseCode=True): """ Opens an URL in the test frame. This accepts both relative and absolute URLs. The "open" command waits for the page to load before proceeding, ie. the "AndWait" suffix is implicit. \ *Note*: The URL must be on the same domain as the runner HTML due to security restrictions in the browser (Same Origin Policy). If you need to open an URL on another domain, use the Selenium Server to start a new browser session on that domain. 'url' is the URL to open; may be relative or absolute 'ignoreResponseCode' if set to true: doesnt send ajax HEAD/GET request; if set to false: sends ajax HEAD/GET request to the url and reports error code if any as response to open. """ self.do_command("open", [url,ignoreResponseCode]) def open_window(self,url,windowID): """ Opens a popup window (if a window with that ID isn't already open). After opening the window, you'll need to select it using the selectWindow command. This command can also be a useful workaround for bug SEL-339. In some cases, Selenium will be unable to intercept a call to window.open (if the call occurs during or before the "onLoad" event, for example). In those cases, you can force Selenium to notice the open window's name by using the Selenium openWindow command, using an empty (blank) url, like this: openWindow("", "myFunnyWindow"). 'url' is the URL to open, which can be blank 'windowID' is the JavaScript window ID of the window to select """ self.do_command("openWindow", [url,windowID,]) def select_window(self,windowID): """ Selects a popup window using a window locator; once a popup window has been selected, all commands go to that window. To select the main window again, use null as the target. Window locators provide different ways of specifying the window object: by title, by internal JavaScript "name," or by JavaScript variable. * \ **title**\ =\ *My Special Window*: Finds the window using the text that appears in the title bar. Be careful; two windows can share the same title. If that happens, this locator will just pick one. * \ **name**\ =\ *myWindow*: Finds the window using its internal JavaScript "name" property. This is the second parameter "windowName" passed to the JavaScript method window.open(url, windowName, windowFeatures, replaceFlag) (which Selenium intercepts). * \ **var**\ =\ *variableName*: Some pop-up windows are unnamed (anonymous), but are associated with a JavaScript variable name in the current application window, e.g. "window.foo = window.open(url);". In those cases, you can open the window using "var=foo". If no window locator prefix is provided, we'll try to guess what you mean like this: 1.) if windowID is null, (or the string "null") then it is assumed the user is referring to the original window instantiated by the browser). 2.) if the value of the "windowID" parameter is a JavaScript variable name in the current application window, then it is assumed that this variable contains the return value from a call to the JavaScript window.open() method. 3.) Otherwise, selenium looks in a hash it maintains that maps string names to window "names". 4.) If \ *that* fails, we'll try looping over all of the known windows to try to find the appropriate "title". Since "title" is not necessarily unique, this may have unexpected behavior. If you're having trouble figuring out the name of a window that you want to manipulate, look at the Selenium log messages which identify the names of windows created via window.open (and therefore intercepted by Selenium). You will see messages like the following for each window as it is opened: ``debug: window.open call intercepted; window ID (which you can use with selectWindow()) is "myNewWindow"`` In some cases, Selenium will be unable to intercept a call to window.open (if the call occurs during or before the "onLoad" event, for example). (This is bug SEL-339.) In those cases, you can force Selenium to notice the open window's name by using the Selenium openWindow command, using an empty (blank) url, like this: openWindow("", "myFunnyWindow"). 'windowID' is the JavaScript window ID of the window to select """ self.do_command("selectWindow", [windowID,]) def select_pop_up(self,windowID): """ Simplifies the process of selecting a popup window (and does not offer functionality beyond what ``selectWindow()`` already provides). * If ``windowID`` is either not specified, or specified as "null", the first non-top window is selected. The top window is the one that would be selected by ``selectWindow()`` without providing a ``windowID`` . This should not be used when more than one popup window is in play. * Otherwise, the window will be looked up considering ``windowID`` as the following in order: 1) the "name" of the window, as specified to ``window.open()``; 2) a javascript variable which is a reference to a window; and 3) the title of the window. This is the same ordered lookup performed by ``selectWindow`` . 'windowID' is an identifier for the popup window, which can take on a number of different meanings """ self.do_command("selectPopUp", [windowID,]) def deselect_pop_up(self): """ Selects the main window. Functionally equivalent to using ``selectWindow()`` and specifying no value for ``windowID``. """ self.do_command("deselectPopUp", []) def select_frame(self,locator): """ Selects a frame within the current window. (You may invoke this command multiple times to select nested frames.) To select the parent frame, use "relative=parent" as a locator; to select the top frame, use "relative=top". You can also select a frame by its 0-based index number; select the first frame with "index=0", or the third frame with "index=2". You may also use a DOM expression to identify the frame you want directly, like this: ``dom=frames["main"].frames["subframe"]`` 'locator' is an element locator identifying a frame or iframe """ self.do_command("selectFrame", [locator,]) def get_whether_this_frame_match_frame_expression(self,currentFrameString,target): """ Determine whether current/locator identify the frame containing this running code. This is useful in proxy injection mode, where this code runs in every browser frame and window, and sometimes the selenium server needs to identify the "current" frame. In this case, when the test calls selectFrame, this routine is called for each frame to figure out which one has been selected. The selected frame will return true, while all others will return false. 'currentFrameString' is starting frame 'target' is new frame (which might be relative to the current one) """ return self.get_boolean("getWhetherThisFrameMatchFrameExpression", [currentFrameString,target,]) def get_whether_this_window_match_window_expression(self,currentWindowString,target): """ Determine whether currentWindowString plus target identify the window containing this running code. This is useful in proxy injection mode, where this code runs in every browser frame and window, and sometimes the selenium server needs to identify the "current" window. In this case, when the test calls selectWindow, this routine is called for each window to figure out which one has been selected. The selected window will return true, while all others will return false. 'currentWindowString' is starting window 'target' is new window (which might be relative to the current one, e.g., "_parent") """ return self.get_boolean("getWhetherThisWindowMatchWindowExpression", [currentWindowString,target,]) def wait_for_pop_up(self,windowID,timeout): """ Waits for a popup window to appear and load up. 'windowID' is the JavaScript window "name" of the window that will appear (not the text of the title bar) If unspecified, or specified as "null", this command will wait for the first non-top window to appear (don't rely on this if you are working with multiple popups simultaneously). 'timeout' is a timeout in milliseconds, after which the action will return with an error. If this value is not specified, the default Selenium timeout will be used. See the setTimeout() command. """ self.do_command("waitForPopUp", [windowID,timeout,]) def choose_cancel_on_next_confirmation(self): """ By default, Selenium's overridden window.confirm() function will return true, as if the user had manually clicked OK; after running this command, the next call to confirm() will return false, as if the user had clicked Cancel. Selenium will then resume using the default behavior for future confirmations, automatically returning true (OK) unless/until you explicitly call this command for each confirmation. Take note - every time a confirmation comes up, you must consume it with a corresponding getConfirmation, or else the next selenium operation will fail. """ self.do_command("chooseCancelOnNextConfirmation", []) def choose_ok_on_next_confirmation(self): """ Undo the effect of calling chooseCancelOnNextConfirmation. Note that Selenium's overridden window.confirm() function will normally automatically return true, as if the user had manually clicked OK, so you shouldn't need to use this command unless for some reason you need to change your mind prior to the next confirmation. After any confirmation, Selenium will resume using the default behavior for future confirmations, automatically returning true (OK) unless/until you explicitly call chooseCancelOnNextConfirmation for each confirmation. Take note - every time a confirmation comes up, you must consume it with a corresponding getConfirmation, or else the next selenium operation will fail. """ self.do_command("chooseOkOnNextConfirmation", []) def answer_on_next_prompt(self,answer): """ Instructs Selenium to return the specified answer string in response to the next JavaScript prompt [window.prompt()]. 'answer' is the answer to give in response to the prompt pop-up """ self.do_command("answerOnNextPrompt", [answer,]) def go_back(self): """ Simulates the user clicking the "back" button on their browser. """ self.do_command("goBack", []) def refresh(self): """ Simulates the user clicking the "Refresh" button on their browser. """ self.do_command("refresh", []) def close(self): """ Simulates the user clicking the "close" button in the titlebar of a popup window or tab. """ self.do_command("close", []) def is_alert_present(self): """ Has an alert occurred? This function never throws an exception """ return self.get_boolean("isAlertPresent", []) def is_prompt_present(self): """ Has a prompt occurred? This function never throws an exception """ return self.get_boolean("isPromptPresent", []) def is_confirmation_present(self): """ Has confirm() been called? This function never throws an exception """ return self.get_boolean("isConfirmationPresent", []) def get_alert(self): """ Retrieves the message of a JavaScript alert generated during the previous action, or fail if there were no alerts. Getting an alert has the same effect as manually clicking OK. If an alert is generated but you do not consume it with getAlert, the next Selenium action will fail. Under Selenium, JavaScript alerts will NOT pop up a visible alert dialog. Selenium does NOT support JavaScript alerts that are generated in a page's onload() event handler. In this case a visible dialog WILL be generated and Selenium will hang until someone manually clicks OK. """ return self.get_string("getAlert", []) def get_confirmation(self): """ Retrieves the message of a JavaScript confirmation dialog generated during the previous action. By default, the confirm function will return true, having the same effect as manually clicking OK. This can be changed by prior execution of the chooseCancelOnNextConfirmation command. If an confirmation is generated but you do not consume it with getConfirmation, the next Selenium action will fail. NOTE: under Selenium, JavaScript confirmations will NOT pop up a visible dialog. NOTE: Selenium does NOT support JavaScript confirmations that are generated in a page's onload() event handler. In this case a visible dialog WILL be generated and Selenium will hang until you manually click OK. """ return self.get_string("getConfirmation", []) def get_prompt(self): """ Retrieves the message of a JavaScript question prompt dialog generated during the previous action. Successful handling of the prompt requires prior execution of the answerOnNextPrompt command. If a prompt is generated but you do not get/verify it, the next Selenium action will fail. NOTE: under Selenium, JavaScript prompts will NOT pop up a visible dialog. NOTE: Selenium does NOT support JavaScript prompts that are generated in a page's onload() event handler. In this case a visible dialog WILL be generated and Selenium will hang until someone manually clicks OK. """ return self.get_string("getPrompt", []) def get_location(self): """ Gets the absolute URL of the current page. """ return self.get_string("getLocation", []) def get_title(self): """ Gets the title of the current page. """ return self.get_string("getTitle", []) def get_body_text(self): """ Gets the entire text of the page. """ return self.get_string("getBodyText", []) def get_value(self,locator): """ Gets the (whitespace-trimmed) value of an input field (or anything else with a value parameter). For checkbox/radio elements, the value will be "on" or "off" depending on whether the element is checked or not. 'locator' is an element locator """ return self.get_string("getValue", [locator,]) def get_text(self,locator): """ Gets the text of an element. This works for any element that contains text. This command uses either the textContent (Mozilla-like browsers) or the innerText (IE-like browsers) of the element, which is the rendered text shown to the user. 'locator' is an element locator """ return self.get_string("getText", [locator,]) def highlight(self,locator): """ Briefly changes the backgroundColor of the specified element yellow. Useful for debugging. 'locator' is an element locator """ self.do_command("highlight", [locator,]) def get_eval(self,script): """ Gets the result of evaluating the specified JavaScript snippet. The snippet may have multiple lines, but only the result of the last line will be returned. Note that, by default, the snippet will run in the context of the "selenium" object itself, so ``this`` will refer to the Selenium object. Use ``window`` to refer to the window of your application, e.g. ``window.document.getElementById('foo')`` If you need to use a locator to refer to a single element in your application page, you can use ``this.browserbot.findElement("id=foo")`` where "id=foo" is your locator. 'script' is the JavaScript snippet to run """ return self.get_string("getEval", [script,]) def is_checked(self,locator): """ Gets whether a toggle-button (checkbox/radio) is checked. Fails if the specified element doesn't exist or isn't a toggle-button. 'locator' is an element locator pointing to a checkbox or radio button """ return self.get_boolean("isChecked", [locator,]) def get_table(self,tableCellAddress): """ Gets the text from a cell of a table. The cellAddress syntax tableLocator.row.column, where row and column start at 0. 'tableCellAddress' is a cell address, e.g. "foo.1.4" """ return self.get_string("getTable", [tableCellAddress,]) def get_selected_labels(self,selectLocator): """ Gets all option labels (visible text) for selected options in the specified select or multi-select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string_array("getSelectedLabels", [selectLocator,]) def get_selected_label(self,selectLocator): """ Gets option label (visible text) for selected option in the specified select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string("getSelectedLabel", [selectLocator,]) def get_selected_values(self,selectLocator): """ Gets all option values (value attributes) for selected options in the specified select or multi-select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string_array("getSelectedValues", [selectLocator,]) def get_selected_value(self,selectLocator): """ Gets option value (value attribute) for selected option in the specified select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string("getSelectedValue", [selectLocator,]) def get_selected_indexes(self,selectLocator): """ Gets all option indexes (option number, starting at 0) for selected options in the specified select or multi-select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string_array("getSelectedIndexes", [selectLocator,]) def get_selected_index(self,selectLocator): """ Gets option index (option number, starting at 0) for selected option in the specified select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string("getSelectedIndex", [selectLocator,]) def get_selected_ids(self,selectLocator): """ Gets all option element IDs for selected options in the specified select or multi-select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string_array("getSelectedIds", [selectLocator,]) def get_selected_id(self,selectLocator): """ Gets option element ID for selected option in the specified select element. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string("getSelectedId", [selectLocator,]) def is_something_selected(self,selectLocator): """ Determines whether some option in a drop-down menu is selected. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_boolean("isSomethingSelected", [selectLocator,]) def get_select_options(self,selectLocator): """ Gets all option labels in the specified select drop-down. 'selectLocator' is an element locator identifying a drop-down menu """ return self.get_string_array("getSelectOptions", [selectLocator,]) def get_attribute(self,attributeLocator): """ Gets the value of an element attribute. The value of the attribute may differ across browsers (this is the case for the "style" attribute, for example). 'attributeLocator' is an element locator followed by an @ sign and then the name of the attribute, e.g. "foo@bar" """ return self.get_string("getAttribute", [attributeLocator,]) def is_text_present(self,pattern): """ Verifies that the specified text pattern appears somewhere on the rendered page shown to the user. 'pattern' is a pattern to match with the text of the page """ return self.get_boolean("isTextPresent", [pattern,]) def is_element_present(self,locator): """ Verifies that the specified element is somewhere on the page. 'locator' is an element locator """ return self.get_boolean("isElementPresent", [locator,]) def is_visible(self,locator): """ Determines if the specified element is visible. An element can be rendered invisible by setting the CSS "visibility" property to "hidden", or the "display" property to "none", either for the element itself or one if its ancestors. This method will fail if the element is not present. 'locator' is an element locator """ return self.get_boolean("isVisible", [locator,]) def is_editable(self,locator): """ Determines whether the specified input element is editable, ie hasn't been disabled. This method will fail if the specified element isn't an input element. 'locator' is an element locator """ return self.get_boolean("isEditable", [locator,]) def get_all_buttons(self): """ Returns the IDs of all buttons on the page. If a given button has no ID, it will appear as "" in this array. """ return self.get_string_array("getAllButtons", []) def get_all_links(self): """ Returns the IDs of all links on the page. If a given link has no ID, it will appear as "" in this array. """ return self.get_string_array("getAllLinks", []) def get_all_fields(self): """ Returns the IDs of all input fields on the page. If a given field has no ID, it will appear as "" in this array. """ return self.get_string_array("getAllFields", []) def get_attribute_from_all_windows(self,attributeName): """ Returns every instance of some attribute from all known windows. 'attributeName' is name of an attribute on the windows """ return self.get_string_array("getAttributeFromAllWindows", [attributeName,]) def dragdrop(self,locator,movementsString): """ deprecated - use dragAndDrop instead 'locator' is an element locator 'movementsString' is offset in pixels from the current location to which the element should be moved, e.g., "+70,-300" """ self.do_command("dragdrop", [locator,movementsString,]) def set_mouse_speed(self,pixels): """ Configure the number of pixels between "mousemove" events during dragAndDrop commands (default=10). Setting this value to 0 means that we'll send a "mousemove" event to every single pixel in between the start location and the end location; that can be very slow, and may cause some browsers to force the JavaScript to timeout. If the mouse speed is greater than the distance between the two dragged objects, we'll just send one "mousemove" at the start location and then one final one at the end location. 'pixels' is the number of pixels between "mousemove" events """ self.do_command("setMouseSpeed", [pixels,]) def get_mouse_speed(self): """ Returns the number of pixels between "mousemove" events during dragAndDrop commands (default=10). """ return self.get_number("getMouseSpeed", []) def drag_and_drop(self,locator,movementsString): """ Drags an element a certain distance and then drops it 'locator' is an element locator 'movementsString' is offset in pixels from the current location to which the element should be moved, e.g., "+70,-300" """ self.do_command("dragAndDrop", [locator,movementsString,]) def drag_and_drop_to_object(self,locatorOfObjectToBeDragged,locatorOfDragDestinationObject): """ Drags an element and drops it on another element 'locatorOfObjectToBeDragged' is an element to be dragged 'locatorOfDragDestinationObject' is an element whose location (i.e., whose center-most pixel) will be the point where locatorOfObjectToBeDragged is dropped """ self.do_command("dragAndDropToObject", [locatorOfObjectToBeDragged,locatorOfDragDestinationObject,]) def window_focus(self): """ Gives focus to the currently selected window """ self.do_command("windowFocus", []) def window_maximize(self): """ Resize currently selected window to take up the entire screen """ self.do_command("windowMaximize", []) def get_all_window_ids(self): """ Returns the IDs of all windows that the browser knows about. """ return self.get_string_array("getAllWindowIds", []) def get_all_window_names(self): """ Returns the names of all windows that the browser knows about. """ return self.get_string_array("getAllWindowNames", []) def get_all_window_titles(self): """ Returns the titles of all windows that the browser knows about. """ return self.get_string_array("getAllWindowTitles", []) def get_html_source(self): """ Returns the entire HTML source between the opening and closing "html" tags. """ return self.get_string("getHtmlSource", []) def set_cursor_position(self,locator,position): """ Moves the text cursor to the specified position in the given input element or textarea. This method will fail if the specified element isn't an input element or textarea. 'locator' is an element locator pointing to an input element or textarea 'position' is the numerical position of the cursor in the field; position should be 0 to move the position to the beginning of the field. You can also set the cursor to -1 to move it to the end of the field. """ self.do_command("setCursorPosition", [locator,position,]) def get_element_index(self,locator): """ Get the relative index of an element to its parent (starting from 0). The comment node and empty text node will be ignored. 'locator' is an element locator pointing to an element """ return self.get_number("getElementIndex", [locator,]) def is_ordered(self,locator1,locator2): """ Check if these two elements have same parent and are ordered siblings in the DOM. Two same elements will not be considered ordered. 'locator1' is an element locator pointing to the first element 'locator2' is an element locator pointing to the second element """ return self.get_boolean("isOrdered", [locator1,locator2,]) def get_element_position_left(self,locator): """ Retrieves the horizontal position of an element 'locator' is an element locator pointing to an element OR an element itself """ return self.get_number("getElementPositionLeft", [locator,]) def get_element_position_top(self,locator): """ Retrieves the vertical position of an element 'locator' is an element locator pointing to an element OR an element itself """ return self.get_number("getElementPositionTop", [locator,]) def get_element_width(self,locator): """ Retrieves the width of an element 'locator' is an element locator pointing to an element """ return self.get_number("getElementWidth", [locator,]) def get_element_height(self,locator): """ Retrieves the height of an element 'locator' is an element locator pointing to an element """ return self.get_number("getElementHeight", [locator,]) def get_cursor_position(self,locator): """ Retrieves the text cursor position in the given input element or textarea; beware, this may not work perfectly on all browsers. Specifically, if the cursor/selection has been cleared by JavaScript, this command will tend to return the position of the last location of the cursor, even though the cursor is now gone from the page. This is filed as SEL-243. This method will fail if the specified element isn't an input element or textarea, or there is no cursor in the element. 'locator' is an element locator pointing to an input element or textarea """ return self.get_number("getCursorPosition", [locator,]) def get_expression(self,expression): """ Returns the specified expression. This is useful because of JavaScript preprocessing. It is used to generate commands like assertExpression and waitForExpression. 'expression' is the value to return """ return self.get_string("getExpression", [expression,]) def get_xpath_count(self,xpath): """ Returns the number of nodes that match the specified xpath, eg. "//table" would give the number of tables. 'xpath' is the xpath expression to evaluate. do NOT wrap this expression in a 'count()' function; we will do that for you. """ return self.get_number("getXpathCount", [xpath,]) def get_css_count(self,css): """ Returns the number of nodes that match the specified css selector, eg. "css=table" would give the number of tables. 'css' is the css selector to evaluate. do NOT wrap this expression in a 'count()' function; we will do that for you. """ return self.get_number("getCssCount", [css,]) def assign_id(self,locator,identifier): """ Temporarily sets the "id" attribute of the specified element, so you can locate it in the future using its ID rather than a slow/complicated XPath. This ID will disappear once the page is reloaded. 'locator' is an element locator pointing to an element 'identifier' is a string to be used as the ID of the specified element """ self.do_command("assignId", [locator,identifier,]) def allow_native_xpath(self,allow): """ Specifies whether Selenium should use the native in-browser implementation of XPath (if any native version is available); if you pass "false" to this function, we will always use our pure-JavaScript xpath library. Using the pure-JS xpath library can improve the consistency of xpath element locators between different browser vendors, but the pure-JS version is much slower than the native implementations. 'allow' is boolean, true means we'll prefer to use native XPath; false means we'll only use JS XPath """ self.do_command("allowNativeXpath", [allow,]) def ignore_attributes_without_value(self,ignore): """ Specifies whether Selenium will ignore xpath attributes that have no value, i.e. are the empty string, when using the non-native xpath evaluation engine. You'd want to do this for performance reasons in IE. However, this could break certain xpaths, for example an xpath that looks for an attribute whose value is NOT the empty string. The hope is that such xpaths are relatively rare, but the user should have the option of using them. Note that this only influences xpath evaluation when using the ajaxslt engine (i.e. not "javascript-xpath"). 'ignore' is boolean, true means we'll ignore attributes without value at the expense of xpath "correctness"; false means we'll sacrifice speed for correctness. """ self.do_command("ignoreAttributesWithoutValue", [ignore,]) def wait_for_condition(self,script,timeout): """ Runs the specified JavaScript snippet repeatedly until it evaluates to "true". The snippet may have multiple lines, but only the result of the last line will be considered. Note that, by default, the snippet will be run in the runner's test window, not in the window of your application. To get the window of your application, you can use the JavaScript snippet ``selenium.browserbot.getCurrentWindow()``, and then run your JavaScript in there 'script' is the JavaScript snippet to run 'timeout' is a timeout in milliseconds, after which this command will return with an error """ self.do_command("waitForCondition", [script,timeout,]) def set_timeout(self,timeout): """ Specifies the amount of time that Selenium will wait for actions to complete. Actions that require waiting include "open" and the "waitFor\*" actions. The default timeout is 30 seconds. 'timeout' is a timeout in milliseconds, after which the action will return with an error """ self.do_command("setTimeout", [timeout,]) def wait_for_page_to_load(self,timeout): """ Waits for a new page to load. You can use this command instead of the "AndWait" suffixes, "clickAndWait", "selectAndWait", "typeAndWait" etc. (which are only available in the JS API). Selenium constantly keeps track of new pages loading, and sets a "newPageLoaded" flag when it first notices a page load. Running any other Selenium command after turns the flag to false. Hence, if you want to wait for a page to load, you must wait immediately after a Selenium command that caused a page-load. 'timeout' is a timeout in milliseconds, after which this command will return with an error """ self.do_command("waitForPageToLoad", [timeout,]) def wait_for_frame_to_load(self,frameAddress,timeout): """ Waits for a new frame to load. Selenium constantly keeps track of new pages and frames loading, and sets a "newPageLoaded" flag when it first notices a page load. See waitForPageToLoad for more information. 'frameAddress' is FrameAddress from the server side 'timeout' is a timeout in milliseconds, after which this command will return with an error """ self.do_command("waitForFrameToLoad", [frameAddress,timeout,]) def get_cookie(self): """ Return all cookies of the current page under test. """ return self.get_string("getCookie", []) def get_cookie_by_name(self,name): """ Returns the value of the cookie with the specified name, or throws an error if the cookie is not present. 'name' is the name of the cookie """ return self.get_string("getCookieByName", [name,]) def is_cookie_present(self,name): """ Returns true if a cookie with the specified name is present, or false otherwise. 'name' is the name of the cookie """ return self.get_boolean("isCookiePresent", [name,]) def create_cookie(self,nameValuePair,optionsString): """ Create a new cookie whose path and domain are same with those of current page under test, unless you specified a path for this cookie explicitly. 'nameValuePair' is name and value of the cookie in a format "name=value" 'optionsString' is options for the cookie. Currently supported options include 'path', 'max_age' and 'domain'. the optionsString's format is "path=/path/, max_age=60, domain=.foo.com". The order of options are irrelevant, the unit of the value of 'max_age' is second. Note that specifying a domain that isn't a subset of the current domain will usually fail. """ self.do_command("createCookie", [nameValuePair,optionsString,]) def delete_cookie(self,name,optionsString): """ Delete a named cookie with specified path and domain. Be careful; to delete a cookie, you need to delete it using the exact same path and domain that were used to create the cookie. If the path is wrong, or the domain is wrong, the cookie simply won't be deleted. Also note that specifying a domain that isn't a subset of the current domain will usually fail. Since there's no way to discover at runtime the original path and domain of a given cookie, we've added an option called 'recurse' to try all sub-domains of the current domain with all paths that are a subset of the current path. Beware; this option can be slow. In big-O notation, it operates in O(n\*m) time, where n is the number of dots in the domain name and m is the number of slashes in the path. 'name' is the name of the cookie to be deleted 'optionsString' is options for the cookie. Currently supported options include 'path', 'domain' and 'recurse.' The optionsString's format is "path=/path/, domain=.foo.com, recurse=true". The order of options are irrelevant. Note that specifying a domain that isn't a subset of the current domain will usually fail. """ self.do_command("deleteCookie", [name,optionsString,]) def delete_all_visible_cookies(self): """ Calls deleteCookie with recurse=true on all cookies visible to the current page. As noted on the documentation for deleteCookie, recurse=true can be much slower than simply deleting the cookies using a known domain/path. """ self.do_command("deleteAllVisibleCookies", []) def set_browser_log_level(self,logLevel): """ Sets the threshold for browser-side logging messages; log messages beneath this threshold will be discarded. Valid logLevel strings are: "debug", "info", "warn", "error" or "off". To see the browser logs, you need to either show the log window in GUI mode, or enable browser-side logging in Selenium RC. 'logLevel' is one of the following: "debug", "info", "warn", "error" or "off" """ self.do_command("setBrowserLogLevel", [logLevel,]) def run_script(self,script): """ Creates a new "script" tag in the body of the current test window, and adds the specified text into the body of the command. Scripts run in this way can often be debugged more easily than scripts executed using Selenium's "getEval" command. Beware that JS exceptions thrown in these script tags aren't managed by Selenium, so you should probably wrap your script in try/catch blocks if there is any chance that the script will throw an exception. 'script' is the JavaScript snippet to run """ self.do_command("runScript", [script,]) def add_location_strategy(self,strategyName,functionDefinition): """ Defines a new function for Selenium to locate elements on the page. For example, if you define the strategy "foo", and someone runs click("foo=blah"), we'll run your function, passing you the string "blah", and click on the element that your function returns, or throw an "Element not found" error if your function returns null. We'll pass three arguments to your function: * locator: the string the user passed in * inWindow: the currently selected window * inDocument: the currently selected document The function must return null if the element can't be found. 'strategyName' is the name of the strategy to define; this should use only letters [a-zA-Z] with no spaces or other punctuation. 'functionDefinition' is a string defining the body of a function in JavaScript. For example: ``return inDocument.getElementById(locator);`` """ self.do_command("addLocationStrategy", [strategyName,functionDefinition,]) def capture_entire_page_screenshot(self,filename,kwargs): """ Saves the entire contents of the current window canvas to a PNG file. Contrast this with the captureScreenshot command, which captures the contents of the OS viewport (i.e. whatever is currently being displayed on the monitor), and is implemented in the RC only. Currently this only works in Firefox when running in chrome mode, and in IE non-HTA using the EXPERIMENTAL "Snapsie" utility. The Firefox implementation is mostly borrowed from the Screengrab! Firefox extension. Please see http://www.screengrab.org and http://snapsie.sourceforge.net/ for details. 'filename' is the path to the file to persist the screenshot as. No filename extension will be appended by default. Directories will not be created if they do not exist, and an exception will be thrown, possibly by native code. 'kwargs' is a kwargs string that modifies the way the screenshot is captured. Example: "background=#CCFFDD" Currently valid options: * background the background CSS for the HTML document. This may be useful to set for capturing screenshots of less-than-ideal layouts, for example where absolute positioning causes the calculation of the canvas dimension to fail and a black background is exposed (possibly obscuring black text). """ self.do_command("captureEntirePageScreenshot", [filename,kwargs,]) def rollup(self,rollupName,kwargs): """ Executes a command rollup, which is a series of commands with a unique name, and optionally arguments that control the generation of the set of commands. If any one of the rolled-up commands fails, the rollup is considered to have failed. Rollups may also contain nested rollups. 'rollupName' is the name of the rollup command 'kwargs' is keyword arguments string that influences how the rollup expands into commands """ self.do_command("rollup", [rollupName,kwargs,]) def add_script(self,scriptContent,scriptTagId): """ Loads script content into a new script tag in the Selenium document. This differs from the runScript command in that runScript adds the script tag to the document of the AUT, not the Selenium document. The following entities in the script content are replaced by the characters they represent: &lt; &gt; &amp; The corresponding remove command is removeScript. 'scriptContent' is the Javascript content of the script to add 'scriptTagId' is (optional) the id of the new script tag. If specified, and an element with this id already exists, this operation will fail. """ self.do_command("addScript", [scriptContent,scriptTagId,]) def remove_script(self,scriptTagId): """ Removes a script tag from the Selenium document identified by the given id. Does nothing if the referenced tag doesn't exist. 'scriptTagId' is the id of the script element to remove. """ self.do_command("removeScript", [scriptTagId,]) def use_xpath_library(self,libraryName): """ Allows choice of one of the available libraries. 'libraryName' is name of the desired library Only the following three can be chosen: * "ajaxslt" - Google's library * "javascript-xpath" - Cybozu Labs' faster library * "default" - The default library. Currently the default library is "ajaxslt" . If libraryName isn't one of these three, then no change will be made. """ self.do_command("useXpathLibrary", [libraryName,]) def set_context(self,context): """ Writes a message to the status bar and adds a note to the browser-side log. 'context' is the message to be sent to the browser """ self.do_command("setContext", [context,]) def attach_file(self,fieldLocator,fileLocator): """ Sets a file input (upload) field to the file listed in fileLocator 'fieldLocator' is an element locator 'fileLocator' is a URL pointing to the specified file. Before the file can be set in the input field (fieldLocator), Selenium RC may need to transfer the file to the local machine before attaching the file in a web page form. This is common in selenium grid configurations where the RC server driving the browser is not the same machine that started the test. Supported Browsers: Firefox ("\*chrome") only. """ self.do_command("attachFile", [fieldLocator,fileLocator,]) def capture_screenshot(self,filename): """ Captures a PNG screenshot to the specified file. 'filename' is the absolute path to the file to be written, e.g. "c:\blah\screenshot.png" """ self.do_command("captureScreenshot", [filename,]) def capture_screenshot_to_string(self): """ Capture a PNG screenshot. It then returns the file as a base 64 encoded string. """ return self.get_string("captureScreenshotToString", []) def captureNetworkTraffic(self, type): """ Returns the network traffic seen by the browser, including headers, AJAX requests, status codes, and timings. When this function is called, the traffic log is cleared, so the returned content is only the traffic seen since the last call. 'type' is The type of data to return the network traffic as. Valid values are: json, xml, or plain. """ return self.get_string("captureNetworkTraffic", [type,]) def capture_network_traffic(self, type): return self.captureNetworkTraffic(type) def addCustomRequestHeader(self, key, value): """ Tells the Selenium server to add the specificed key and value as a custom outgoing request header. This only works if the browser is configured to use the built in Selenium proxy. 'key' the header name. 'value' the header value. """ return self.do_command("addCustomRequestHeader", [key,value,]) def add_custom_request_header(self, key, value): return self.addCustomRequestHeader(key, value) def capture_entire_page_screenshot_to_string(self,kwargs): """ Downloads a screenshot of the browser current window canvas to a based 64 encoded PNG file. The \ *entire* windows canvas is captured, including parts rendered outside of the current view port. Currently this only works in Mozilla and when running in chrome mode. 'kwargs' is A kwargs string that modifies the way the screenshot is captured. Example: "background=#CCFFDD". This may be useful to set for capturing screenshots of less-than-ideal layouts, for example where absolute positioning causes the calculation of the canvas dimension to fail and a black background is exposed (possibly obscuring black text). """ return self.get_string("captureEntirePageScreenshotToString", [kwargs,]) def shut_down_selenium_server(self): """ Kills the running Selenium Server and all browser sessions. After you run this command, you will no longer be able to send commands to the server; you can't remotely start the server once it has been stopped. Normally you should prefer to run the "stop" command, which terminates the current browser session, rather than shutting down the entire server. """ self.do_command("shutDownSeleniumServer", []) def retrieve_last_remote_control_logs(self): """ Retrieve the last messages logged on a specific remote control. Useful for error reports, especially when running multiple remote controls in a distributed environment. The maximum number of log messages that can be retrieve is configured on remote control startup. """ return self.get_string("retrieveLastRemoteControlLogs", []) def key_down_native(self,keycode): """ Simulates a user pressing a key (without releasing it yet) by sending a native operating system keystroke. This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular element, focus on the element first before running this command. 'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes! """ self.do_command("keyDownNative", [keycode,]) def key_up_native(self,keycode): """ Simulates a user releasing a key by sending a native operating system keystroke. This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular element, focus on the element first before running this command. 'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes! """ self.do_command("keyUpNative", [keycode,]) def key_press_native(self,keycode): """ Simulates a user pressing and releasing a key by sending a native operating system keystroke. This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular element, focus on the element first before running this command. 'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes! """ self.do_command("keyPressNative", [keycode,])
apache-2.0
esrefozturk/checkmate
CheckmateServerTests/test2.py
1
2922
# This file is part of Checkmate. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Copyright 2015 Ozge Lule(ozge.lule@ceng.metu.edu.tr), # Esref Ozturk(esref.ozturk@ceng.metu.edu.tr) from random import randint from socket import * from json import * from Test import Test bookmodes = ['worst', 'best', 'random'] test = Test() s1 = socket(AF_INET, SOCK_STREAM) s1.connect(("0.0.0.0", 20000)) s2 = socket(AF_INET, SOCK_STREAM) s2.connect(("0.0.0.0", 20000)) data = test.send(s1, '{"op":"start", "color":"White","params":["multi","None","None"]}') data = loads(data) gameid = data['gameid'] test.send(s2, '{"op":"connect" , "color":"Black","gameid":"%d"}' % gameid) # Be sure to download and extract http://ftp.gnu.org/gnu/chess/book_1.00.pgn.gz test.send(s1, '{"op":"play","params":["addbook","book_1.00.pgn"]}') test.send(s1, '{"op":"play","params":["enablebook","True"]}') count = 0 while True: if count % 2 == 0: test.send(s1, '{"op":"play","params":["setbookmode","%s"]}' % bookmodes[randint(0, 2)]) move = test.send(s1, '{"op":"play","params":["hint"]}') move = loads(move) if move['hint']: test.send(s1, '{"op":"play","params":["nextmove","%s","%s"]}' % ("White", move['hint'])) else: test.send(s1, '{"op":"kill"}') test.send(s2, '{"op":"kill"}') exit(1) data2 = test.send(s2, '{"op":"play","params":["isfinished"]}') data2 = loads(data2) if data2['isfinished']: test.send(s2, '{"op":"kill"}') test.send(s1, '{"op":"kill"}') exit() else: test.send(s2, '{"op":"play","params":["setbookmode","%s"]}' % bookmodes[randint(0, 2)]) move = test.send(s2, '{"op":"play","params":["hint"]}') move = loads(move) if move['hint']: test.send(s2, '{"op":"play","params":["nextmove","%s","%s"]}' % ("Black", move['hint'])) else: test.send(s2, '{"op":"kill"}') test.send(s1, '{"op":"kill"}') exit() data1 = test.send(s1, '{"op":"play","params":["isfinished"]}') data1 = loads(data1) if data1['isfinished']: test.send(s1, '{"op":"kill"}') test.send(s2, '{"op":"kill"}') exit() count += 1
gpl-3.0
grycap/scar
examples/mask-detector-workflow/blurry-faces/src/DetectorAPI.py
1
2691
import tensorflow as tf import numpy as np import time class DetectorAPI: def __init__(self, path_to_ckpt): self.path_to_ckpt = path_to_ckpt self.detection_graph = tf.Graph() with self.detection_graph.as_default(): od_graph_def = tf.compat.v1.GraphDef() with tf.io.gfile.GFile(self.path_to_ckpt, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') self.default_graph = self.detection_graph.as_default() self.sess = tf.compat.v1.Session(graph=self.detection_graph) # Definite input and output Tensors for detection_graph self.image_tensor = self.detection_graph.get_tensor_by_name( 'image_tensor:0') # Each box represents a part of the image where a particular object was detected. self.detection_boxes = self.detection_graph.get_tensor_by_name( 'detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. self.detection_scores = self.detection_graph.get_tensor_by_name( 'detection_scores:0') self.detection_classes = self.detection_graph.get_tensor_by_name( 'detection_classes:0') self.num_detections = self.detection_graph.get_tensor_by_name( 'num_detections:0') def processFrame(self, image): # Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image, axis=0) # Actual detection. start_time = time.time() (boxes, scores, classes, num) = self.sess.run([ self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections ], feed_dict={self.image_tensor: image_np_expanded}) end_time = time.time() print("Elapsed Time:", end_time - start_time) im_height, im_width, _ = image.shape boxes_list = [None for i in range(boxes.shape[1])] for i in range(boxes.shape[1]): boxes_list[i] = (int(boxes[0, i, 1] * im_width), int(boxes[0, i, 0] * im_height), int(boxes[0, i, 3] * im_width), int(boxes[0, i, 2] * im_height)) return boxes_list, scores[0].tolist(), [ int(x) for x in classes[0].tolist() ], int(num[0]) def close(self): self.sess.close() self.default_graph.close()
apache-2.0
brandond/ansible
test/units/modules/network/nxos/test_nxos_ip_interface.py
33
3374
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat.mock import patch from ansible.modules.network.nxos import _nxos_ip_interface from .nxos_module import TestNxosModule, load_fixture, set_module_args class TestNxosIPInterfaceModule(TestNxosModule): module = _nxos_ip_interface def setUp(self): super(TestNxosIPInterfaceModule, self).setUp() self.mock_get_interface_mode = patch( 'ansible.modules.network.nxos._nxos_ip_interface.get_interface_mode') self.get_interface_mode = self.mock_get_interface_mode.start() self.mock_send_show_command = patch( 'ansible.modules.network.nxos._nxos_ip_interface.send_show_command') self.send_show_command = self.mock_send_show_command.start() self.mock_load_config = patch('ansible.modules.network.nxos._nxos_ip_interface.load_config') self.load_config = self.mock_load_config.start() self.mock_get_capabilities = patch('ansible.modules.network.nxos._nxos_ip_interface.get_capabilities') self.get_capabilities = self.mock_get_capabilities.start() self.get_capabilities.return_value = {'network_api': 'cliconf'} def tearDown(self): super(TestNxosIPInterfaceModule, self).tearDown() self.mock_get_interface_mode.stop() self.mock_send_show_command.stop() self.mock_load_config.stop() self.mock_get_capabilities.stop() def load_fixtures(self, commands=None, device=''): self.get_interface_mode.return_value = 'layer3' self.send_show_command.return_value = [load_fixture('', '_nxos_ip_interface.cfg')] self.load_config.return_value = None def test_nxos_ip_interface_ip_present(self): set_module_args(dict(interface='eth2/1', addr='1.1.1.2', mask=8)) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['interface eth2/1', 'no ip address 192.0.2.1/8', 'ip address 1.1.1.2/8']) def test_nxos_ip_interface_ip_idempotent(self): set_module_args(dict(interface='eth2/1', addr='192.0.2.1', mask=8)) result = self.execute_module(changed=False) self.assertEqual(result['commands'], []) def test_nxos_ip_interface_ip_absent(self): set_module_args(dict(interface='eth2/1', state='absent', addr='192.0.2.1', mask=8)) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['interface eth2/1', 'no ip address 192.0.2.1/8'])
gpl-3.0
bregman-arie/ansible
test/units/playbook/test_playbook.py
119
2239
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock from ansible.errors import AnsibleError, AnsibleParserError from ansible.playbook import Playbook from ansible.vars.manager import VariableManager from units.mock.loader import DictDataLoader class TestPlaybook(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_empty_playbook(self): fake_loader = DictDataLoader({}) p = Playbook(loader=fake_loader) def test_basic_playbook(self): fake_loader = DictDataLoader({ "test_file.yml": """ - hosts: all """, }) p = Playbook.load("test_file.yml", loader=fake_loader) plays = p.get_plays() def test_bad_playbook_files(self): fake_loader = DictDataLoader({ # represents a playbook which is not a list of plays "bad_list.yml": """ foo: bar """, # represents a playbook where a play entry is mis-formatted "bad_entry.yml": """ - - "This should be a mapping..." """, }) vm = VariableManager() self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader) self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
gpl-3.0
infOpen/ansible_customer
tests/conftest.py
1
5437
import errno import logging import os from paramiko.client import SSHClient, AutoAddPolicy from paramiko.ssh_exception import AuthenticationException, SSHException, \ NoValidConnectionsError import pytest import requests import shutil def _create_or_update_symplink(target, link_name): """ Create or update a symlink """ try: os.symlink(target, link_name) except OSError as error: if error.errno == errno.EEXIST: os.remove(link_name) os.symlink(target, link_name) else: raise error def _check_sshd_service(ip_address, ssh_port): """ Ensure SSHd service running on the container """ with SSHClient() as ssh_client: ssh_client.set_missing_host_key_policy(AutoAddPolicy()) # Add Paramiko transport console logger if requested if os.environ.get('PARAMIKO_DEBUG'): paramiko_logger = logging.getLogger('paramiko.transport') console_handler = logging.StreamHandler() console_handler.setFormatter( logging.Formatter('%(asctime)s | %(levelname)-8s| PARAMIKO: ' '%(lineno)03d@%(module)-10s| %(message)s') ) paramiko_logger.addHandler(console_handler) paramiko_logger.setLevel(logging.DEBUG) # Check with bad credentials to raise an AuthenticationException try: ssh_client.connect( # nosec ip_address, port=ssh_port, username='root', password='foobar', allow_agent=False, look_for_keys=False) except AuthenticationException: return True except (SSHException, NoValidConnectionsError): return False @pytest.fixture(scope='session') def aci_ansible_target(docker_ip, docker_services): """ Ensure that "some service" is up and responsive. """ ssh_port = docker_services.port_for('aci-ansible-target', 22) # Check SSH connection before next steps docker_services.wait_until_responsive( timeout=30.0, pause=0.1, check=lambda: _check_sshd_service(docker_ip, ssh_port) ) return {'ip': docker_ip, 'ssh_port': ssh_port} @pytest.fixture(scope='session') def aci_ansible_structure(tmpdir_factory, aci_ansible_target): """ This fixture manage a basic ansible project structure with: * hosts file * private key file """ BASE_IMAGE_PRIVATE_KEY_URL = ( 'https://github.com/phusion/baseimage-docker/raw/master/image/' + 'services/sshd/keys/insecure_key') hosts_infos = 'ansible_host={} ansible_user=root ansible_port={}'.format( aci_ansible_target.get('ip'), aci_ansible_target.get('ssh_port') ) hosts_file_content = [ 'foo {}'.format(hosts_infos), 'bar {}'.format(hosts_infos), 'foobar {}'.format(hosts_infos), ] base_image_private_key = requests.get(BASE_IMAGE_PRIVATE_KEY_URL) base_dir = tmpdir_factory.mktemp('ansible_config') base_dir.join('roles').mkdir() base_dir.join('hosts').write('\n'.join(hosts_file_content)) base_dir.join('ssh_key').write(base_image_private_key.content) base_dir.join('ssh_key').chmod(0o400) shutil.copy2( os.path.join(os.getcwd(), 'tests/resources/ansible/basic_play.yml'), base_dir.join('basic_play.yml').strpath ) shutil.copy2( os.path.join(os.getcwd(), 'tests/resources/ansible/requirements.yml'), base_dir.join('requirements.yml').strpath ) return base_dir @pytest.fixture(scope='session') def aci_ansible_project(aci_ansible_structure): """ Prepare environment vars to work with aci_ansible_project fixture """ inventory_path = aci_ansible_structure.join('hosts').strpath private_key_path = aci_ansible_structure.join('ssh_key').strpath roles_path = aci_ansible_structure.join('roles').strpath os.environ['ANSIBLE_INVENTORY'] = inventory_path os.environ['ANSIBLE_HOST_KEY_CHECKING'] = str(False) os.environ['ANSIBLE_PRIVATE_KEY_FILE'] = private_key_path os.environ['ANSIBLE_ROLES_PATH'] = roles_path return aci_ansible_structure @pytest.fixture(scope='session') def aci_molecule_project(tmpdir_factory): """ This fixture manage a basic molecule scenario structure with: * create and destroy playbooks * molecule configuration file * playbook to run """ base_dir = tmpdir_factory.mktemp('molecule_config') base_dir.join('molecule').mkdir() scenario_dir = base_dir.join('molecule').join('basic-scenario') scenario_dir.mkdir() scenario_dir.join('tests').mkdir() managed_filenames = [ 'Dockerfile', 'create.yml', 'destroy.yml', 'molecule.yml', 'playbook.yml', 'requirements.yml', '.yamllint', ] for filename in managed_filenames: shutil.copy2( os.path.join( os.getcwd(), 'tests/resources/molecule/{}'.format(filename)), scenario_dir.join('{}'.format(filename)).strpath ) shutil.copy2( os.path.join( os.getcwd(), 'tests/resources/molecule/tests/test_default.py'), scenario_dir.join('tests').join('test_default.py').strpath ) _create_or_update_symplink(base_dir.join('molecule').strpath, 'molecule')
mit
mvcsantos/QGIS
python/plugins/processing/algs/gdal/rasterize_over.py
3
2801
# -*- coding: utf-8 -*- """ *************************************************************************** rasterize_over.py --------------------- Date : September 2013 Copyright : (C) 2013 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'September 2013' __copyright__ = '(C) 2013, Alexander Bruy' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from processing.core.parameters import ParameterVector from processing.core.parameters import ParameterRaster from processing.core.parameters import ParameterTableField from processing.core.parameters import ParameterSelection from processing.core.outputs import OutputRaster from processing.algs.gdal.OgrAlgorithm import OgrAlgorithm from processing.algs.gdal.GdalUtils import GdalUtils class rasterize_over(OgrAlgorithm): INPUT = 'INPUT' INPUT_RASTER = 'INPUT_RASTER' FIELD = 'FIELD' def commandLineName(self): return "gdalogr:rasterize_over" def defineCharacteristics(self): self.name = 'Rasterize (write over existing raster)' self.group = '[GDAL] Conversion' self.addParameter(ParameterVector(self.INPUT, self.tr('Input layer'))) self.addParameter(ParameterTableField(self.FIELD, self.tr('Attribute field'), self.INPUT)) self.addParameter(ParameterRaster(self.INPUT_RASTER, self.tr('Existing raster layer'), False)) def getConsoleCommands(self, progress): inLayer = self.getParameterValue(self.INPUT) ogrLayer = self.ogrConnectionString(inLayer)[1:-1] inRasterLayer = self.getParameterValue(self.INPUT_RASTER) ogrRasterLayer = self.ogrConnectionString(inRasterLayer)[1:-1] arguments = [] arguments.append('-a') arguments.append(str(self.getParameterValue(self.FIELD))) arguments.append('-l') arguments.append(self.ogrLayerName(inLayer)) arguments.append(ogrLayer) arguments.append(ogrRasterLayer) return ['gdal_rasterize', GdalUtils.escapeAndJoin(arguments)]
gpl-2.0
samirasnoun/django_cms_gallery_image
cms/tests/toolbar.py
4
60596
# -*- coding: utf-8 -*- from __future__ import with_statement import datetime import re from django.contrib import admin from django.contrib.auth.models import AnonymousUser, Permission from django.template.defaultfilters import truncatewords from django.test import TestCase from django.test.client import RequestFactory from django.utils.functional import lazy from django.utils.translation import ugettext_lazy as _ from django.core.urlresolvers import reverse from cms.api import create_page, create_title, add_plugin from cms.cms_toolbar import ADMIN_MENU_IDENTIFIER, ADMINISTRATION_BREAK from cms.middleware.toolbar import ToolbarMiddleware from cms.models import Page, UserSettings, PagePermission from cms.toolbar.items import (ToolbarAPIMixin, LinkItem, ItemSearchResult, Break, SubMenu, AjaxItem) from cms.toolbar.toolbar import CMSToolbar from cms.test_utils.project.placeholderapp.models import (Example1, CharPksExample, MultilingualExample1) from cms.test_utils.project.placeholderapp.views import (detail_view, detail_view_char, detail_view_multi, detail_view_multi_unfiltered) from cms.test_utils.testcases import (SettingsOverrideTestCase, URL_CMS_PAGE_ADD, URL_CMS_PAGE_CHANGE) from cms.test_utils.util.context_managers import SettingsOverride, UserLoginContext from cms.utils.compat import DJANGO_1_4 from cms.utils.conf import get_cms_setting from cms.utils.urlutils import admin_reverse from cms.views import details class ToolbarTestBase(SettingsOverrideTestCase): def get_page_request(self, page, user, path=None, edit=False, lang_code='en'): path = path or page and page.get_absolute_url() if edit: path += '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON') request = RequestFactory().get(path) request.session = {} request.user = user request.LANGUAGE_CODE = lang_code if edit: request.GET = {'edit': None} else: request.GET = {'edit_off': None} request.current_page = page mid = ToolbarMiddleware() mid.process_request(request) request.toolbar.populate() return request def get_anon(self): return AnonymousUser() def get_staff(self): staff = self._create_user('staff', True, False) staff.user_permissions.add(Permission.objects.get(codename='change_page')) return staff def get_nonstaff(self): nonstaff = self._create_user('nonstaff') nonstaff.user_permissions.add(Permission.objects.get(codename='change_page')) return nonstaff def get_superuser(self): superuser = self._create_user('superuser', True, True) return superuser class ToolbarTests(ToolbarTestBase): settings_overrides = {'CMS_PERMISSION': False} def test_no_page_anon(self): request = self.get_page_request(None, self.get_anon(), '/') toolbar = CMSToolbar(request) toolbar.populate() toolbar.post_template_populate() items = toolbar.get_left_items() + toolbar.get_right_items() self.assertEqual(len(items), 0) def test_no_page_staff(self): request = self.get_page_request(None, self.get_staff(), '/') toolbar = CMSToolbar(request) toolbar.populate() toolbar.post_template_populate() items = toolbar.get_left_items() + toolbar.get_right_items() # Logo + admin-menu + logout self.assertEqual(len(items), 2, items) admin_items = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, 'Test').get_items() self.assertEqual(len(admin_items), 7, admin_items) def test_no_page_superuser(self): request = self.get_page_request(None, self.get_superuser(), '/') toolbar = CMSToolbar(request) toolbar.populate() toolbar.post_template_populate() items = toolbar.get_left_items() + toolbar.get_right_items() # Logo + edit-mode + admin-menu + logout self.assertEqual(len(items), 2) admin_items = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, 'Test').get_items() self.assertEqual(len(admin_items), 8, admin_items) def test_anon(self): page = create_page('test', 'nav_playground.html', 'en') request = self.get_page_request(page, self.get_anon()) toolbar = CMSToolbar(request) items = toolbar.get_left_items() + toolbar.get_right_items() self.assertEqual(len(items), 0) def test_nonstaff(self): page = create_page('test', 'nav_playground.html', 'en', published=True) request = self.get_page_request(page, self.get_nonstaff()) toolbar = CMSToolbar(request) items = toolbar.get_left_items() + toolbar.get_right_items() # Logo + edit-mode + logout self.assertEqual(len(items), 0) def test_template_change_permission(self): with SettingsOverride(CMS_PERMISSIONS=True): page = create_page('test', 'nav_playground.html', 'en', published=True) request = self.get_page_request(page, self.get_nonstaff()) toolbar = CMSToolbar(request) items = toolbar.get_left_items() + toolbar.get_right_items() self.assertEqual([item for item in items if item.css_class_suffix == 'templates'], []) def test_markup(self): create_page("toolbar-page", "nav_playground.html", "en", published=True) superuser = self.get_superuser() with self.login_user_context(superuser): response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'nav_playground.html') self.assertContains(response, '<div id="cms_toolbar"') self.assertContains(response, 'cms.base.css') def test_markup_generic_module(self): create_page("toolbar-page", "col_two.html", "en", published=True) superuser = self.get_superuser() with self.login_user_context(superuser): response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) self.assertEqual(response.status_code, 200) self.assertContains(response, '<div class="cms_submenu-item cms_submenu-item-title"><span>Generic</span>') def test_markup_flash_custom_module(self): superuser = self.get_superuser() create_page("toolbar-page", "col_two.html", "en", published=True) with self.login_user_context(superuser): response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) self.assertEqual(response.status_code, 200) self.assertContains(response, 'href="LinkPlugin">') self.assertContains(response, '<div class="cms_submenu-item cms_submenu-item-title"><span>Different Grouper</span>') def test_show_toolbar_to_staff(self): page = create_page("toolbar-page", "nav_playground.html", "en", published=True) request = self.get_page_request(page, self.get_staff(), '/') toolbar = CMSToolbar(request) self.assertTrue(toolbar.show_toolbar) def test_show_toolbar_with_edit(self): page = create_page("toolbar-page", "nav_playground.html", "en", published=True) request = self.get_page_request(page, AnonymousUser(), edit=True) toolbar = CMSToolbar(request) self.assertTrue(toolbar.show_toolbar) def test_show_toolbar_staff(self): page = create_page("toolbar-page", "nav_playground.html", "en", published=True) request = self.get_page_request(page, self.get_staff(), edit=True) self.assertTrue(request.session.get('cms_build', True)) self.assertTrue(request.session.get('cms_edit', False)) def test_hide_toolbar_non_staff(self): page = create_page("toolbar-page", "nav_playground.html", "en", published=True) request = self.get_page_request(page, self.get_nonstaff(), edit=True) self.assertFalse(request.session.get('cms_build', True)) self.assertFalse(request.session.get('cms_edit', True)) def test_show_toolbar_without_edit(self): page = create_page("toolbar-page", "nav_playground.html", "en", published=True) request = self.get_page_request(page, AnonymousUser(), edit=False) toolbar = CMSToolbar(request) self.assertFalse(toolbar.show_toolbar) def test_publish_button(self): page = create_page('test', 'nav_playground.html', 'en', published=True) request = self.get_page_request(page, self.get_superuser(), edit=True) toolbar = CMSToolbar(request) toolbar.populate() toolbar.post_template_populate() self.assertTrue(toolbar.edit_mode) items = toolbar.get_left_items() + toolbar.get_right_items() self.assertEqual(len(items), 7) def test_no_publish_button(self): page = create_page('test', 'nav_playground.html', 'en', published=True) request = self.get_page_request(page, self.get_staff(), edit=True) toolbar = CMSToolbar(request) toolbar.populate() toolbar.post_template_populate() self.assertTrue(page.has_change_permission(request)) self.assertFalse(page.has_publish_permission(request)) self.assertTrue(toolbar.edit_mode) items = toolbar.get_left_items() + toolbar.get_right_items() # Logo + edit-mode + templates + page-menu + admin-menu + logout self.assertEqual(len(items), 6) def test_no_change_button(self): page = create_page('test', 'nav_playground.html', 'en', published=True) user = self.get_staff() user.user_permissions.all().delete() request = self.get_page_request(page, user, edit=True) toolbar = CMSToolbar(request) toolbar.populate() toolbar.post_template_populate() self.assertFalse(page.has_change_permission(request)) self.assertFalse(page.has_publish_permission(request)) items = toolbar.get_left_items() + toolbar.get_right_items() # Logo + page-menu + admin-menu + logout self.assertEqual(len(items), 3, items) admin_items = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, 'Test').get_items() self.assertEqual(len(admin_items), 7, admin_items) def test_button_consistency_staff(self): """ Tests that the buttons remain even when the language changes. """ user = self.get_staff() cms_page = create_page('test-en', 'nav_playground.html', 'en', published=True) create_title('de', 'test-de', cms_page) cms_page.publish('de') en_request = self.get_page_request(cms_page, user, edit=True) en_toolbar = CMSToolbar(en_request) en_toolbar.populate() en_toolbar.post_template_populate() self.assertEqual(len(en_toolbar.get_left_items() + en_toolbar.get_right_items()), 6) de_request = self.get_page_request(cms_page, user, path='/de/', edit=True, lang_code='de') de_toolbar = CMSToolbar(de_request) de_toolbar.populate() de_toolbar.post_template_populate() self.assertEqual(len(de_toolbar.get_left_items() + de_toolbar.get_right_items()), 6) def test_placeholder_name(self): with SettingsOverride(CMS_PLACEHOLDER_CONF={ 'col_left': {'name': 'PPPP'} }): superuser = self.get_superuser() create_page("toolbar-page", "col_two.html", "en", published=True) with self.login_user_context(superuser): response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) self.assertEqual(response.status_code, 200) self.assertContains(response, 'PPPP') def test_user_settings(self): superuser = self.get_superuser() with self.login_user_context(superuser): response = self.client.get('/en/admin/cms/usersettings/') self.assertEqual(response.status_code, 200) def test_remove_lang(self): create_page('test', 'nav_playground.html', 'en', published=True) superuser = self.get_superuser() with self.login_user_context(superuser): response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) self.assertEqual(response.status_code, 200) setting = UserSettings.objects.get(user=superuser) setting.language = 'it' setting.save() with SettingsOverride(LANGUAGES=(('en', 'english'),)): response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) self.assertEqual(response.status_code, 200) self.assertNotContains(response, '/it/') def test_get_alphabetical_insert_position(self): page = create_page("toolbar-page", "nav_playground.html", "en", published=True) request = self.get_page_request(page, self.get_staff(), '/') toolbar = CMSToolbar(request) toolbar.get_left_items() toolbar.get_right_items() admin_menu = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, 'TestAppMenu') # Insert alpha alpha_position = admin_menu.get_alphabetical_insert_position('menu-alpha', SubMenu, None) # As this will be the first item added to this, this use should return the default, or namely None if not alpha_position: alpha_position = admin_menu.find_first(Break, identifier=ADMINISTRATION_BREAK) + 1 admin_menu.get_or_create_menu('menu-alpha', 'menu-alpha', position=alpha_position) # Insert gamma (should return alpha_position + 1) gamma_position = admin_menu.get_alphabetical_insert_position('menu-gamma', SubMenu) self.assertEqual(int(gamma_position), int(alpha_position) + 1) admin_menu.get_or_create_menu('menu-gamma', 'menu-gamma', position=gamma_position) # Where should beta go? It should go right where gamma is now... beta_position = admin_menu.get_alphabetical_insert_position('menu-beta', SubMenu) self.assertEqual(beta_position, gamma_position) def test_out_of_order(self): page = create_page("toolbar-page", "nav_playground.html", "en", published=True) request = self.get_page_request(page, self.get_staff(), '/') toolbar = CMSToolbar(request) menu1 = toolbar.get_or_create_menu("test") menu2 = toolbar.get_or_create_menu("test", "Test", side=toolbar.RIGHT, position=2) self.assertEqual(menu1, menu2) self.assertEqual(menu1.name, 'Test') self.assertEqual(len(toolbar.get_right_items()), 1) def test_page_create_redirect(self): superuser = self.get_superuser() create_page("home", "nav_playground.html", "en", published=True) resolve_url = admin_reverse('cms_page_resolve') with self.login_user_context(superuser): response = self.client.post(resolve_url, {'pk': '', 'model': 'cms.page'}) self.assertEqual(response.content.decode('utf-8'), '') page_data = self.get_new_page_data() self.client.post(URL_CMS_PAGE_ADD, page_data) response = self.client.post(resolve_url, {'pk': Page.objects.all()[2].pk, 'model': 'cms.page'}) self.assertEqual(response.content.decode('utf-8'), '/en/test-page-1/') def test_page_edit_redirect(self): page1 = create_page("home", "nav_playground.html", "en", published=True) page2 = create_page("test", "nav_playground.html", "en", published=True) superuser = self.get_superuser() with self.login_user_context(superuser): page_data = self.get_new_page_data() self.client.post(URL_CMS_PAGE_CHANGE % page2.pk, page_data) url = admin_reverse('cms_page_resolve') response = self.client.post(url, {'pk': page1.pk, 'model': 'cms.page'}) self.assertEqual(response.content.decode('utf-8'), '/en/test-page-1/') response = self.client.post(url, {'pk': page1.pk, 'model': 'cms.page'}) self.assertEqual(response.content.decode('utf-8'), '/en/') response = self.client.post(url, {'pk': page1.pk, 'model': 'cms.page'}) self.assertEqual(response.content.decode('utf-8'), '') def get_username(self, user=None, default=''): user = user or self.request.user try: name = user.get_full_name() if name: return name elif DJANGO_1_4: return user.username else: return user.get_username() except (AttributeError, NotImplementedError): return default def test_toolbar_logout(self): ''' Tests that the Logout menu item includes the user's full name, if the relevant fields were populated in auth.User, else the user's username. ''' superuser = self.get_superuser() # Ensure that some other test hasn't set the name fields if superuser.get_full_name(): # Looks like it has been set, clear them superuser.first_name = '' superuser.last_name = '' superuser.save() page = create_page("home", "nav_playground.html", "en", published=True) page.publish('en') self.get_page_request(page, superuser, '/') # # Test that the logout shows the username of the logged-in user if # first_name and last_name haven't been provided. # with self.login_user_context(superuser): response = self.client.get(page.get_absolute_url('en') + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) toolbar = response.context['request'].toolbar admin_menu = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER) self.assertTrue(admin_menu.find_first(AjaxItem, name=_(u'Logout %s') % self.get_username(superuser))) # # Test that the logout shows the logged-in user's name, if it was # populated in auth.User. # superuser.first_name = 'Super' superuser.last_name = 'User' superuser.save() # Sanity check... self.get_page_request(page, superuser, '/') with self.login_user_context(superuser): response = self.client.get(page.get_absolute_url('en') + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) toolbar = response.context['request'].toolbar admin_menu = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER) self.assertTrue(admin_menu.find_first(AjaxItem, name=_(u'Logout %s') % self.get_username(superuser))) def test_toolbar_logout_redirect(self): """ Tests the logount AjaxItem on_success parameter in four different conditions: * published page: no redirect * unpublished page: redirect to the home page * published page with login_required: redirect to the home page * published page with view permissions: redirect to the home page """ superuser = self.get_superuser() page0 = create_page("home", "nav_playground.html", "en", published=True) page1 = create_page("internal", "nav_playground.html", "en", published=True, parent=page0) page2 = create_page("unpublished", "nav_playground.html", "en", published=False, parent=page0) page3 = create_page("login_restricted", "nav_playground.html", "en", published=True, parent=page0, login_required=True) page4 = create_page("view_restricted", "nav_playground.html", "en", published=True, parent=page0) PagePermission.objects.create(page=page4, can_view=True, user=superuser) page4.publish('en') page4 = page4.get_public_object() self.get_page_request(page4, superuser, '/') with self.login_user_context(superuser): # Published page, no redirect response = self.client.get(page1.get_absolute_url('en') + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) toolbar = response.context['request'].toolbar menu_name = _(u'Logout %s') % self.get_username(superuser) admin_menu = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER) self.assertTrue(admin_menu.find_first(AjaxItem, name=menu_name).item.on_success) # Unpublished page, redirect response = self.client.get(page2.get_absolute_url('en') + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) toolbar = response.context['request'].toolbar admin_menu = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER) self.assertEquals(admin_menu.find_first(AjaxItem, name=menu_name).item.on_success, '/') # Published page with login restrictions, redirect response = self.client.get(page3.get_absolute_url('en') + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) toolbar = response.context['request'].toolbar admin_menu = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER) self.assertEquals(admin_menu.find_first(AjaxItem, name=menu_name).item.on_success, '/') # Published page with view permissions, redirect response = self.client.get(page4.get_absolute_url('en') + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) toolbar = response.context['request'].toolbar admin_menu = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER) self.assertEquals(admin_menu.find_first(AjaxItem, name=menu_name).item.on_success, '/') class EditModelTemplateTagTest(ToolbarTestBase): urls = 'cms.test_utils.project.placeholderapp_urls' edit_fields_rx = "(\?|&amp;)edit_fields=%s" def tearDown(self): Example1.objects.all().delete() MultilingualExample1.objects.all().delete() super(EditModelTemplateTagTest, self).tearDown() def test_anon(self): user = self.get_anon() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() request = self.get_page_request(page, user, edit=False) response = detail_view(request, ex1.pk) self.assertContains(response, "<h1>char_1</h1>") self.assertNotContains(response, "CMS.API") def test_noedit(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() request = self.get_page_request(page, user, edit=False) response = detail_view(request, ex1.pk) self.assertContains(response, "<h1>char_1</h1>") self.assertContains(response, "CMS.API") def test_edit(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">char_1</div></h1>' % ( 'placeholderapp', 'example1', 'char_1', ex1.pk)) def test_invalid_item(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model fake "char_1" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<div class="cms_plugin cms_plugin-%s cms_render_model"></div>' % ex1.pk) def test_as_varname(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "char_1" as tempvar %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertNotContains( response, '<div class="cms_plugin cms_plugin-%s cms_render_model"></div>' % ex1.pk) def test_edit_render_placeholder(self): """ Tests the {% render_placeholder %} templatetag. """ user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() render_placeholder_body = "I'm the render placeholder body" plugin = add_plugin(ex1.placeholder, u"TextPlugin", u"en", body=render_placeholder_body) template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_placeholder instance.placeholder %}</h1> <h2>{% render_placeholder instance.placeholder as tempvar %}</h2> <h3>{{ tempvar }}</h3> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<h1><div class="cms_placeholder cms_placeholder-{0}"></div>\n' '<div class="cms_plugin cms_plugin-{1}">{2}</div></h1>'.format(ex1.placeholder.pk, plugin.pk, render_placeholder_body) ) self.assertContains( response, '<h2></h2>', ) # # NOTE: Using the render_placeholder "as" form should /not/ render # frontend placeholder editing support. # self.assertContains( response, '<h3>{0}</h3>'.format(render_placeholder_body) ) self.assertContains( response, 'CMS.Plugin(\'cms_plugin-{0}\''.format(plugin.pk) ) self.assertContains( response, 'CMS.Plugin(\'cms_placeholder-{0}\''.format(ex1.placeholder.pk) ) def test_filters(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1, <p>hello</p>, <p>hello</p>, <p>hello</p>, <p>hello</p>", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "char_1" "" "" 'truncatewords:2' %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">%s</div></h1>' % ( 'placeholderapp', 'example1', 'char_1', ex1.pk, truncatewords(ex1.char_1, 2))) def test_filters_date(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1, <p>hello</p>, <p>hello</p>, <p>hello</p>, <p>hello</p>", char_2="char_2", char_3="char_3", char_4="char_4", date_field=datetime.date(2012, 1, 1)) ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "date_field" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">%s</div></h1>' % ( 'placeholderapp', 'example1', 'date_field', ex1.pk, ex1.date_field.strftime("%Y-%m-%d"))) template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "date_field" "" "" 'date:"Y m d"' %}</h1> {% endblock content %} ''' response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">%s</div></h1>' % ( 'placeholderapp', 'example1', 'date_field', ex1.pk, ex1.date_field.strftime("%Y %m %d"))) def test_filters_notoolbar(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1, <p>hello</p>, <p>hello</p>, <p>hello</p>, <p>hello</p>", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "char_1" "" "" 'truncatewords:2' %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=False) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains(response, '<h1>%s</h1>' % truncatewords(ex1.char_1, 2)) def test_no_cms(self): user = self.get_staff() ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} {% render_model_icon instance %} {% endblock content %} ''' request = self.get_page_request('', user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<div class="cms_plugin cms_plugin-%s-%s-%s cms_render_model_icon"><img src="/static/cms/img/toolbar/render_model_placeholder.png"></div>' % ( 'placeholderapp', 'example1', ex1.pk)) self.assertContains(response, "'onClose': 'REFRESH_PAGE',") def test_icon_tag(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} {% render_model_icon instance %} {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<div class="cms_plugin cms_plugin-%s-%s-%s cms_render_model_icon"><img src="/static/cms/img/toolbar/render_model_placeholder.png"></div>' % ( 'placeholderapp', 'example1', ex1.pk)) def test_icon_followed_by_render_model_block_tag(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4", date_field=datetime.date(2012, 1, 1)) ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %}{% load url from future %} {% block content %} {% render_model_icon instance "char_1" %} {% render_model_block instance "char_2" %} {{ instance }} <h1>{{ instance.char_1 }} - {{ instance.char_2 }}</h1> <span class="date">{{ instance.date_field|date:"Y" }}</span> {% if instance.char_1 %} <a href="{% url 'detail' instance.pk %}">successful if</a> {% endif %} {% endrender_model_block %} {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, "new CMS.Plugin('cms_plugin-{0}-{1}-{2}-{3}'".format('placeholderapp', 'example1', 'char_1', ex1.pk)) self.assertContains( response, "new CMS.Plugin('cms_plugin-{0}-{1}-{2}-{3}'".format('placeholderapp', 'example1', 'char_2', ex1.pk)) def test_add_tag(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} {% render_model_add instance %} {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<div class="cms_plugin cms_plugin-%s-%s-add-%s cms_render_model_add"><img src="/static/cms/img/toolbar/render_model_placeholder.png"></div>' % ( 'placeholderapp', 'example1', ex1.pk)) def test_block_tag(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4", date_field=datetime.date(2012, 1, 1)) ex1.save() # This template does not render anything as content is saved in a # variable and never inserted in the page template_text = '''{% extends "base.html" %} {% load cms_tags %}{% load url from future %} {% block content %} {% render_model_block instance as rendered_model %} {{ instance }} <h1>{{ instance.char_1 }} - {{ instance.char_2 }}</h1> {{ instance.date_field|date:"Y" }} {% if instance.char_1 %} <a href="{% url 'detail' instance.pk %}">successful if</a> {% endif %} {% endrender_model_block %} {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertNotContains( response, '<div class="cms_plugin cms_plugin-%s-%s-%s cms_render_model_icon"><img src="/static/cms/img/toolbar/render_model_icon.png"></div>' % ( 'placeholderapp', 'example1', ex1.pk)) # This template does not render anything as content is saved in a # variable and inserted in the page afterwards template_text = '''{% extends "base.html" %} {% load cms_tags %}{% load url from future %} {% block content %} {% render_model_block instance as rendered_model %} {{ instance }} <h1>{{ instance.char_1 }} - {{ instance.char_2 }}</h1> <span class="date">{{ instance.date_field|date:"Y" }}</span> {% if instance.char_1 %} <a href="{% url 'detail' instance.pk %}">successful if</a> {% endif %} {% endrender_model_block %} {{ rendered_model }} {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) # Assertions on the content of the block tag self.assertContains( response, '<div class="cms_plugin cms_plugin-%s-%s-%s cms_render_model cms_render_model_block">' % ( 'placeholderapp', 'example1', ex1.pk)) self.assertContains(response, '<h1>%s - %s</h1>' % (ex1.char_1, ex1.char_2)) self.assertContains(response, '<span class="date">%s</span>' % (ex1.date_field.strftime("%Y"))) self.assertContains(response, '<a href="%s">successful if</a></div>' % (reverse('detail', args=(ex1.pk,)))) # This template is rendered directly template_text = '''{% extends "base.html" %} {% load cms_tags %}{% load url from future %} {% block content %} {% render_model_block instance %} {{ instance }} <h1>{{ instance.char_1 }} - {{ instance.char_2 }}</h1> <span class="date">{{ instance.date_field|date:"Y" }}</span> {% if instance.char_1 %} <a href="{% url 'detail' instance.pk %}">successful if</a> {% endif %} {% endrender_model_block %} {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) # Assertions on the content of the block tag self.assertContains( response, '<div class="cms_plugin cms_plugin-%s-%s-%s cms_render_model cms_render_model_block">' % ( 'placeholderapp', 'example1', ex1.pk)) self.assertContains(response, '<h1>%s - %s</h1>' % (ex1.char_1, ex1.char_2)) self.assertContains(response, '<span class="date">%s</span>' % (ex1.date_field.strftime("%Y"))) self.assertContains(response, '<a href="%s">successful if</a></div>' % (reverse('detail', args=(ex1.pk,)))) # Changelist check template_text = '''{% extends "base.html" %} {% load cms_tags %}{% load url from future %} {% block content %} {% render_model_block instance 'changelist' %} {{ instance }} {% endrender_model_block %} {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) # Assertions on the content of the block tag self.assertContains( response, '<div class="cms_plugin cms_plugin-%s-%s-changelist-%s cms_render_model cms_render_model_block">' % ( 'placeholderapp', 'example1', ex1.pk)) def test_invalid_attribute(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "fake_field" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model"></div>' % ( 'placeholderapp', 'example1', 'fake_field', ex1.pk)) # no attribute template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<div class="cms_plugin cms_plugin-%s cms_render_model"></div>' % ex1.pk) def test_callable_item(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "callable_item" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">char_1</div></h1>' % ( 'placeholderapp', 'example1', 'callable_item', ex1.pk)) def test_view_method(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "callable_item" "char_1,char_2" "en" "" "" "dynamic_url" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response,"'edit_plugin': '/admin/placeholderapp/example1/edit-field/%s/en/" % ex1.pk) def test_view_url(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "callable_item" "char_1,char_2" "en" "" "admin:placeholderapp_example1_edit_field" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response,"'edit_plugin': '/admin/placeholderapp/example1/edit-field/%s/en/" % ex1.pk) def test_method_attribute(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "callable_item" "char_1,char_2" "en" "" "" "static_admin_url" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) ex1.set_static_url(request) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">char_1</div></h1>' % ( 'placeholderapp', 'example1', 'callable_item', ex1.pk)) def test_admin_url(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "callable_item" "char_1" "en" "" "admin:placeholderapp_example1_edit_field" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains(response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">char_1</div></h1>' % ( 'placeholderapp', 'example1', 'callable_item', ex1.pk)) def test_admin_url_extra_field(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "callable_item" "char_2" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains(response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">char_1</div></h1>' % ( 'placeholderapp', 'example1', 'callable_item', ex1.pk)) self.assertContains(response, "/admin/placeholderapp/example1/edit-field/%s/en/" % ex1.pk) self.assertTrue(re.search(self.edit_fields_rx % "char_2", response.content.decode('utf8'))) def test_admin_url_multiple_fields(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "callable_item" "char_1,char_2" "en" "" "admin:placeholderapp_example1_edit_field" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">char_1</div></h1>' % ( 'placeholderapp', 'example1', 'callable_item', ex1.pk)) self.assertContains(response, "/admin/placeholderapp/example1/edit-field/%s/en/" % ex1.pk) self.assertTrue(re.search(self.edit_fields_rx % "char_1", response.content.decode('utf8'))) self.assertTrue(re.search(self.edit_fields_rx % "char_1%2Cchar_2", response.content.decode('utf8'))) def test_instance_method(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance "callable_item" %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">char_1</div></h1>' % ( 'placeholderapp', 'example1', 'callable_item', ex1.pk)) def test_item_from_context(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template_text = '''{% extends "base.html" %} {% load cms_tags %} {% block content %} <h1>{% render_model instance item_name %}</h1> {% endblock content %} ''' request = self.get_page_request(page, user, edit=True) response = detail_view(request, ex1.pk, template_string=template_text, item_name="callable_item") self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">char_1</div></h1>' % ( 'placeholderapp', 'example1', 'callable_item', ex1.pk)) def test_edit_field(self): from django.contrib.admin import site exadmin = site._registry[Example1] user = self.get_superuser() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() request = self.get_page_request(page, user, edit=True) request.GET['edit_fields'] = 'char_1' response = exadmin.edit_field(request, ex1.pk, "en") self.assertContains(response, 'id="id_char_1"') self.assertContains(response, 'value="char_1"') def test_edit_field_not_allowed(self): from django.contrib.admin import site exadmin = site._registry[Example1] user = self.get_superuser() page = create_page('Test', 'col_two.html', 'en', published=True) ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() request = self.get_page_request(page, user, edit=True) request.GET['edit_fields'] = 'char_3' response = exadmin.edit_field(request, ex1.pk, "en") self.assertEqual(response.status_code, 200) self.assertContains(response, 'Field char_3 not found') def test_multi_edit(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) title = create_title("fr", "test", page) exm = MultilingualExample1() exm.translate("en") exm.char_1 = 'one' exm.char_2 = 'two' exm.save() exm.translate("fr") exm.char_1 = "un" exm.char_2 = "deux" exm.save() request = self.get_page_request(page, user, edit=True, lang_code="en") response = detail_view_multi(request, exm.pk) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">one</div></h1>' % ( 'placeholderapp', 'multilingualexample1', 'char_1', exm.pk)) self.assertContains(response, "/admin/placeholderapp/multilingualexample1/edit-field/%s/en/" % exm.pk) self.assertTrue(re.search(self.edit_fields_rx % "char_1", response.content.decode('utf8'))) self.assertTrue(re.search(self.edit_fields_rx % "char_1%2Cchar_2", response.content.decode('utf8'))) with SettingsOverride(LANGUAGE_CODE="fr"): request = self.get_page_request(title.page, user, edit=True, lang_code="fr") response = detail_view_multi(request, exm.pk) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">un</div></h1>' % ( 'placeholderapp', 'multilingualexample1', 'char_1', exm.pk)) self.assertContains(response, "/admin/placeholderapp/multilingualexample1/edit-field/%s/fr/" % exm.pk) self.assertTrue(re.search(self.edit_fields_rx % "char_1%2Cchar_2", response.content.decode('utf8'))) def test_multi_edit_no500(self): user = self.get_staff() page = create_page('Test', 'col_two.html', 'en', published=True) title = create_title("fr", "test", page) exm = MultilingualExample1() exm.translate("fr") exm.char_1 = "un" exm.char_2 = "deux" exm.save() with SettingsOverride(LANGUAGE_CODE="fr"): request = self.get_page_request(title.page, user, edit=True, lang_code="fr") response = detail_view_multi_unfiltered(request, exm.pk) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">un</div></h1>' % ( 'placeholderapp', 'multilingualexample1', 'char_1', exm.pk)) self.assertContains(response, "/admin/placeholderapp/multilingualexample1/edit-field/%s/fr/" % exm.pk) self.assertTrue(re.search(self.edit_fields_rx % "char_1%2Cchar_2", response.content.decode('utf8'))) with SettingsOverride(LANGUAGE_CODE="de"): request = self.get_page_request(title.page, user, edit=True, lang_code="de") response = detail_view_multi_unfiltered(request, exm.pk) self.assertContains( response, '<h1><div class="cms_plugin cms_plugin-%s-%s-%s-%s cms_render_model">un</div></h1>' % ( 'placeholderapp', 'multilingualexample1', 'char_1', exm.pk)) self.assertContains(response, "/admin/placeholderapp/multilingualexample1/edit-field/%s/de/" % exm.pk) self.assertTrue(re.search(self.edit_fields_rx % "char_1%2Cchar_2", response.content.decode('utf8'))) def test_edit_field_multilingual(self): from django.contrib.admin import site exadmin = site._registry[MultilingualExample1] user = self.get_superuser() page = create_page('Test', 'col_two.html', 'en', published=True) title = create_title("fr", "test", page) exm = MultilingualExample1() exm.translate("en") exm.char_1 = 'one' exm.char_2 = 'two' exm.save() exm.translate("fr") exm.char_1 = "un" exm.char_2 = "deux" exm.save() request = self.get_page_request(page, user, edit=True) request.GET['edit_fields'] = 'char_2' response = exadmin.edit_field(request, exm.pk, "en") self.assertContains(response, 'id="id_char_2"') self.assertContains(response, 'value="two"') response = exadmin.edit_field(request, exm.pk, "fr") self.assertContains(response, 'id="id_char_2"') self.assertContains(response, 'value="deux"') with SettingsOverride(LANGUAGE_CODE="fr"): request = self.get_page_request(title.page, user, edit=True, lang_code="fr") request.GET['edit_fields'] = 'char_2' response = exadmin.edit_field(request, exm.pk, "fr") self.assertContains(response, 'id="id_char_2"') self.assertContains(response, 'value="deux"') def test_edit_page(self): language = "en" user = self.get_superuser() page = create_page('Test', 'col_two.html', language, published=True) title = page.get_title_obj(language) title.menu_title = 'Menu Test' title.page_title = 'Page Test' title.title = 'Main Test' title.save() page.publish('en') page.reload() request = self.get_page_request(page, user, edit=True) response = details(request, '') self.assertContains( response, '<div class="cms_plugin cms_plugin-cms-page-get_page_title-%s cms_render_model">%s</div>' % ( page.pk, page.get_page_title(language))) self.assertContains( response, '<div class="cms_plugin cms_plugin-cms-page-get_menu_title-%s cms_render_model">%s</div>' % ( page.pk, page.get_menu_title(language))) self.assertContains( response, '<div class="cms_plugin cms_plugin-cms-page-get_title-%s cms_render_model">%s</div>' % ( page.pk, page.get_title(language))) self.assertContains( response, '<div class="cms_plugin cms_plugin-cms-page-changelist-%s cms_render_model cms_render_model_block"><h3>Menu</h3></div>' % page.pk) class CharPkFrontendPlaceholderAdminTest(ToolbarTestBase): def get_admin(self): admin.autodiscover() return admin.site._registry[CharPksExample] def test_url_char_pk(self): """ Tests whether the frontend admin matches the edit_fields url with alphanumeric pks """ ex = CharPksExample( char_1='one', slug='some-Special_slug_123', ) ex.save() superuser = self.get_superuser() with UserLoginContext(self, superuser): response = self.client.get(admin_reverse('placeholderapp_charpksexample_edit_field', args=(ex.pk, 'en')), data={'edit_fields': 'char_1'}) # if we get a response pattern matches self.assertEqual(response.status_code, 200) def test_url_numeric_pk(self): """ Tests whether the frontend admin matches the edit_fields url with numeric pks """ ex = Example1( char_1='one', char_2='two', char_3='tree', char_4='four' ) ex.save() superuser = self.get_superuser() with UserLoginContext(self, superuser): response = self.client.get(admin_reverse('placeholderapp_example1_edit_field', args=(ex.pk, 'en')), data={'edit_fields': 'char_1'}) # if we get a response pattern matches self.assertEqual(response.status_code, 200) def test_view_char_pk(self): """ Tests whether the admin urls triggered when the toolbar is active works (i.e.: no NoReverseMatch is raised) with alphanumeric pks """ page = create_page('Test', 'col_two.html', 'en', published=True) ex = CharPksExample( char_1='one', slug='some-Special_slug_123', ) ex.save() superuser = self.get_superuser() request = self.get_page_request(page, superuser, edit=True) response = detail_view_char(request, ex.pk) # if we get a response pattern matches self.assertEqual(response.status_code, 200) def test_view_numeric_pk(self): """ Tests whether the admin urls triggered when the toolbar is active works (i.e.: no NoReverseMatch is raised) with numeric pks """ page = create_page('Test', 'col_two.html', 'en', published=True) ex = Example1( char_1='one', char_2='two', char_3='tree', char_4='four' ) ex.save() superuser = self.get_superuser() request = self.get_page_request(page, superuser, edit=True) response = detail_view(request, ex.pk) # if we get a response pattern matches self.assertEqual(response.status_code, 200) class ToolbarAPITests(TestCase): def test_find_item(self): api = ToolbarAPIMixin() first = api.add_link_item('First', 'http://www.example.org') second = api.add_link_item('Second', 'http://www.example.org') all_links = api.find_items(LinkItem) self.assertEqual(len(all_links), 2) result = api.find_first(LinkItem, name='First') self.assertNotEqual(result, None) self.assertEqual(result.index, 0) self.assertEqual(result.item, first) result = api.find_first(LinkItem, name='Second') self.assertNotEqual(result, None) self.assertEqual(result.index, 1) self.assertEqual(result.item, second) no_result = api.find_first(LinkItem, name='Third') self.assertEqual(no_result, None) def test_find_item_lazy(self): lazy_attribute = lazy(lambda x: x, str)('Test') api = ToolbarAPIMixin() api.add_link_item(lazy_attribute, None) result = api.find_first(LinkItem, name='Test') self.assertNotEqual(result, None) self.assertEqual(result.index, 0) def test_not_is_staff(self): request = RequestFactory().get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) request.session = {} request.LANGUAGE_CODE = 'en' request.user = AnonymousUser() toolbar = CMSToolbar(request) self.assertEqual(len(toolbar.get_left_items()), 0) self.assertEqual(len(toolbar.get_right_items()), 0) def test_item_search_result(self): item = object() result = ItemSearchResult(item, 2) self.assertEqual(result.item, item) self.assertEqual(int(result), 2) result += 2 self.assertEqual(result.item, item) self.assertEqual(result.index, 4)
bsd-3-clause
biocommons/hgvs
hgvs/alignmentmapper.py
1
13454
# -*- coding: utf-8 -*- """Mapping positions between pairs of sequence alignments The AlignmentMapper class is at the heart of mapping between aligned sequences. """ # Implementation note re: "no-zero correction": HGVS doesn't have a # 0. Counting is -3, -2, -1, 1, 2, 3 :-/ Coordinate calculations must # take this discontinuity in c. positions into account. The # implementation of imaginary transcript positions creates a second # discontinuity. (By analogy with c., n.0 is declared to not exist.) # The strategy used in this code is to use internal c0 and n0 # coordinates, which include 0, for coordinate calculations and to # translate these to c. and n. positions as needed. # # imag. imag. # upstream 5' UTR CDS 3' UTR downstr # |> # - - - - - - ———————————— ||||||||||||||||| ——————————— - - - - - - # a b C D E f g h i # c. -4 -3 -2 -1 ! 1 2 3 ! *1 *2 *3 *4 # c0 -4 -3 -2 -1 0 1 2 3 4 5 6 # n0 -2 -1 0 1 2 3 4 5 6 7 8 # n. -2 -1 ! 1 2 3 4 5 6 7 8 9 # g. ... 123 124 125 126 127 128 129 130 131 132 133 ... # from __future__ import absolute_import, division, print_function, unicode_literals from six.moves import range from bioutils.coordinates import strand_int_to_pm import hgvs.location from hgvs import global_config from hgvs.exceptions import HGVSError, HGVSUsageError, HGVSDataNotAvailableError, HGVSInvalidIntervalError from hgvs.utils import build_tx_cigar from hgvs.utils.cigarmapper import CIGARMapper from hgvs.enums import Datum def _zbc_to_hgvs(i): """Convert zero-based coordinate to hgvs (1 based, missing zero)""" if i >= 0: i += 1 return i def _hgvs_to_zbc(i): """Convert hgvs (1 based, missing zero) """ if i >= 1: i -= 1 return i class AlignmentMapper(object): """Maps hgvs location objects between genomic (g), non-coding (n) and cds (c) coordinates according to a CIGAR string. :param hdp: HGVS Data Provider Interface-compliant instance (see :class:`hgvs.dataproviders.interface.Interface`) :param str tx_ac: string representing transcript accession (e.g., NM_000551.2) :param str alt_ac: string representing the reference sequence accession (e.g., NC_000019.10) :param str alt_aln_method: string representing the alignment method; valid values depend on data source """ __slots__ = ("tx_ac", "alt_ac", "alt_aln_method", "strand", "gc_offset", "cds_start_i", "cds_end_i", "tgt_len", "cigarmapper", "ref_pos", "tgt_pos", "cigar_op") def __init__(self, hdp, tx_ac, alt_ac, alt_aln_method): self.tx_ac = tx_ac self.alt_ac = alt_ac self.alt_aln_method = alt_aln_method if self.alt_aln_method != "transcript": tx_info = hdp.get_tx_info(self.tx_ac, self.alt_ac, self.alt_aln_method) if tx_info is None: raise HGVSDataNotAvailableError( "AlignmentMapper(tx_ac={self.tx_ac}, " "alt_ac={self.alt_ac}, alt_aln_method={self.alt_aln_method}): " "No transcript info".format(self=self)) tx_exons = hdp.get_tx_exons(self.tx_ac, self.alt_ac, self.alt_aln_method) if tx_exons is None: raise HGVSDataNotAvailableError( "AlignmentMapper(tx_ac={self.tx_ac}, " "alt_ac={self.alt_ac}, alt_aln_method={self.alt_aln_method}): " "No transcript exons".format(self=self)) # hgvs-386: An assumption when building the cigar string # is that exons are adjacent. Assert that here. sorted_tx_exons = sorted(tx_exons, key=lambda e: e["ord"]) for i in range(1, len(sorted_tx_exons)): if sorted_tx_exons[i - 1]["tx_end_i"] != sorted_tx_exons[i]["tx_start_i"]: raise HGVSDataNotAvailableError( "AlignmentMapper(tx_ac={self.tx_ac}, " "alt_ac={self.alt_ac}, alt_aln_method={self.alt_aln_method}): " "Exons {a} and {b} are not adjacent".format(self=self, a=i, b=i + 1)) self.strand = tx_exons[0]["alt_strand"] self.gc_offset = tx_exons[0]["alt_start_i"] self.cds_start_i = tx_info["cds_start_i"] self.cds_end_i = tx_info["cds_end_i"] cigar = build_tx_cigar(tx_exons, self.strand) self.cigarmapper = CIGARMapper(cigar) self.tgt_len = self.cigarmapper.tgt_len else: # this covers the identity cases n <-> c tx_identity_info = hdp.get_tx_identity_info(self.tx_ac) if tx_identity_info is None: raise HGVSDataNotAvailableError( "AlignmentMapper(tx_ac={self.tx_ac}, " "alt_ac={self.alt_ac}, alt_aln_method={self.alt_aln_method}): " "No transcript info".format(self=self)) self.cds_start_i = tx_identity_info["cds_start_i"] self.cds_end_i = tx_identity_info["cds_end_i"] self.tgt_len = sum(tx_identity_info["lengths"]) self.cigarmapper = None assert not ( (self.cds_start_i is None) ^ (self.cds_end_i is None)), "CDS start and end must both be defined or neither defined" def __str__(self): return "{self.__class__.__name__}: {self.tx_ac} ~ {self.alt_ac} ~ {self.alt_aln_method}; " \ "{strand_pm} strand; offset={self.gc_offset}".format( self=self, strand_pm=strand_int_to_pm(self.strand)) def g_to_n(self, g_interval, strict_bounds=None): """convert a genomic (g.) interval to a transcript cDNA (n.) interval""" if strict_bounds is None: strict_bounds = global_config.mapping.strict_bounds grs, gre = g_interval.start.base - 1 - self.gc_offset, g_interval.end.base - 1 - self.gc_offset # frs, fre = (f)orward (r)na (s)tart & (e)nd; forward w.r.t. genome frs, frs_offset, frs_cigar = self.cigarmapper.map_ref_to_tgt(pos=grs, end="start", strict_bounds=strict_bounds) fre, fre_offset, fre_cigar = self.cigarmapper.map_ref_to_tgt(pos=gre, end="end", strict_bounds=strict_bounds) if self.strand == -1: frs, fre = self.tgt_len - 1 - fre, self.tgt_len - 1 - frs frs_offset, fre_offset = -fre_offset, -frs_offset # The returned interval would be uncertain when locating at alignment gaps return hgvs.location.BaseOffsetInterval( start=hgvs.location.BaseOffsetPosition( base=_zbc_to_hgvs(frs), offset=frs_offset, datum=Datum.SEQ_START), end=hgvs.location.BaseOffsetPosition( base=_zbc_to_hgvs(fre), offset=fre_offset, datum=Datum.SEQ_START), uncertain=frs_cigar in 'DI' or fre_cigar in 'DI') def n_to_g(self, n_interval, strict_bounds=None): """convert a transcript (n.) interval to a genomic (g.) interval""" if strict_bounds is None: strict_bounds = global_config.mapping.strict_bounds frs = _hgvs_to_zbc(n_interval.start.base) start_offset = n_interval.start.offset fre = _hgvs_to_zbc(n_interval.end.base) end_offset = n_interval.end.offset if self.strand == -1: fre, frs = self.tgt_len - 1 - frs, self.tgt_len - 1 - fre start_offset, end_offset = -end_offset, -start_offset # returns the genomic range start (grs) and end (gre) grs, _, grs_cigar = self.cigarmapper.map_tgt_to_ref(pos=frs, end="start", strict_bounds=strict_bounds) gre, _, gre_cigar = self.cigarmapper.map_tgt_to_ref(pos=fre, end="end", strict_bounds=strict_bounds) grs, gre = grs + self.gc_offset + 1, gre + self.gc_offset + 1 gs, ge = grs + start_offset, gre + end_offset # The returned interval would be uncertain when locating at alignment gaps return hgvs.location.Interval( start=hgvs.location.SimplePosition(gs, uncertain=n_interval.start.uncertain), end=hgvs.location.SimplePosition(ge, uncertain=n_interval.end.uncertain), uncertain=grs_cigar in 'DI' or gre_cigar in 'DI') def n_to_c(self, n_interval, strict_bounds=None): """convert a transcript cDNA (n.) interval to a transcript CDS (c.) interval""" if strict_bounds is None: strict_bounds = global_config.mapping.strict_bounds if self.cds_start_i is None: # cds_start_i defined iff cds_end_i defined; see assertion above raise HGVSUsageError( "CDS is undefined for {self.tx_ac}; cannot map to c. coordinate (non-coding transcript?)" .format(self=self)) if strict_bounds and (n_interval.start.base <= 0 or n_interval.end.base > self.tgt_len): raise HGVSInvalidIntervalError( "The given coordinate is outside the bounds of the reference sequence.") def pos_n_to_c(pos): if pos.base <= self.cds_start_i: c = pos.base - self.cds_start_i - (1 if pos.base > 0 else 0) c_datum = Datum.CDS_START elif pos.base > self.cds_start_i and pos.base <= self.cds_end_i: c = pos.base - self.cds_start_i c_datum = Datum.CDS_START else: c = pos.base - self.cds_end_i c_datum = Datum.CDS_END return hgvs.location.BaseOffsetPosition(base=c, offset=pos.offset, datum=c_datum) c_interval = hgvs.location.BaseOffsetInterval(start=pos_n_to_c(n_interval.start), end=pos_n_to_c(n_interval.end), uncertain=n_interval.uncertain) return c_interval def c_to_n(self, c_interval, strict_bounds=None): """convert a transcript CDS (c.) interval to a transcript cDNA (n.) interval""" if strict_bounds is None: strict_bounds = global_config.mapping.strict_bounds if self.cds_start_i is None: raise HGVSUsageError( "CDS is undefined for {self.tx_ac}; this accession appears to be for a non-coding transcript" .format(self=self)) def pos_c_to_n(pos): if pos.datum == Datum.CDS_START: n = pos.base + self.cds_start_i if pos.base < 0: # correct for lack of c.0 coordinate n += 1 elif pos.datum == Datum.CDS_END: n = pos.base + self.cds_end_i if n <= 0: # correct for lack of n.0 coordinate n -= 1 if (n <= 0 or n > self.tgt_len): if strict_bounds: raise HGVSInvalidIntervalError(f"c.{pos} coordinate is out of bounds") return hgvs.location.BaseOffsetPosition(base=n, offset=pos.offset, datum=Datum.SEQ_START) n_start = pos_c_to_n(c_interval.start) n_end = pos_c_to_n(c_interval.end) n_interval = hgvs.location.BaseOffsetInterval(start=pos_c_to_n(c_interval.start), end=pos_c_to_n(c_interval.end), uncertain=c_interval.uncertain) return n_interval def g_to_c(self, g_interval, strict_bounds=None): """convert a genomic (g.) interval to a transcript CDS (c.) interval""" return self.n_to_c(self.g_to_n(g_interval), strict_bounds=strict_bounds) def c_to_g(self, c_interval, strict_bounds=None): """convert a transcript CDS (c.) interval to a genomic (g.) interval""" return self.n_to_g(self.c_to_n(c_interval), strict_bounds=strict_bounds) @property def is_coding_transcript(self): if ((self.cds_start_i is not None) ^ (self.cds_end_i is not None)): raise HGVSError("{self.tx_ac}: CDS start_i and end_i" " must be both defined or both undefined".format(self=self)) return self.cds_start_i is not None def g_interval_is_inbounds(self, ival): grs = ival.start.base - 1 - self.gc_offset gre = ival.end.base - 1 - self.gc_offset return grs >= 0 and gre <= self.cigarmapper.ref_len # <LICENSE> # Copyright 2018 HGVS Contributors (https://github.com/biocommons/hgvs) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # </LICENSE>
apache-2.0
elationfoundation/rss_keyword_collector
rss_keyword_collector/feed.py
1
6292
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # This file is part of rss_keyword_parser, a simple term extractor from rss feeds. # Copyright © 2015 seamus tuohy, <stuohy@internews.org> # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. from datetime import datetime import feedparser from twisted.application import service from twisted.enterprise import adbapi from twisted.internet import task, protocol from twisted.internet.defer import inlineCallbacks from twisted.web.client import getPage class FeedService(service.Service): def __init__(self, feed_db, interval=30): """ Args: feed (named_tuple): dbmodule: an import string to use to obtain a DB-API compatible module (e.g. 'pyPgSQL.PgSQL') name: The name of the database to connect to within the module user: The username to log in to the database with. password: The users password to the database. host: The host where the database can be reached port: The port used to access the database interval (int): Number of minutes between feed queries (rounded to nearest minute). """ self.interval = int(interval / 60) if self.interval <= 60: self.interval = 60 self.dbpool = adbapi.ConnectionPool(feed_db.dbmodule, host = feed_db.host, port = feed_db.port, database = feed_db.name, user = feed_db.user, password = feed_db.password, cp_noisy = True) # Create a feed collector self.feed_collector = FeedCollector(self.dbpool) # Every [interval] run the collector self.call = task.LoopingCall(self.startService).start(self.interval) def startService(self): print("Starting Feed Collector") self.feed_collector.run() def stopService(self): # stop the reactor.call if self.call: self.call.cancel() class FeedCollector(protocol.ClientFactory): def __init__(self, dbconn): self.dbpool = dbconn self.feeds = set() @inlineCallbacks def run(self): feeds = yield self.get_feed_list() #print(feeds) for url in feeds: page = yield getPage(url[0]) entry_feeds = self.parse_entries(page, url) for entry_name, entry_items in entry_feeds.iteritems(): #print(entry_name) entry = yield self.update_entries(entry_items) def query_feeds(self): for feed in self.feeds: page = getPage(feed) page.addCallback(self.update_feed, url=feed) yield page, feed def update_feed(self, page, url): feed = feedparser.parse(page) channel_info = {} channel_items = ["title", "description", "language", "lastBuildDate", "ttl", "pubDate", "copyright", "webMaster", "managingEditor"] channel_info = {item: feed.feed.get(item, "") for item in channel_items} db = self.dbpool.runOperation("UPDATE feeds " "SET title = %s, " "language = %s, " "description = %s " "WHERE url = %s", (channel_info['title'], channel_info['language'], channel_info['description'], url)) return db def get_feed_list(self): return self.dbpool.runQuery("SELECT url FROM feeds") def parse_entries(self, page, feed_url): feed = feedparser.parse(page) channel_items = ["language"] entry_items = ["title", "link", "description", "author", "category", "guid", "comments"] entries = {} for entry in feed.entries: entries[entry["title"]] = {item: entry.get(item, "") for item in entry_items} entries[entry["title"]]["pubDate"] = entry.get("pubDate", datetime.now()) entries[entry["title"]]["scraped"] = datetime.now() entries[entry["title"]]["url"] = feed_url # Enforcing the database string limits entries[entry["title"]]["description"] = entries[entry["title"]]["description"][0:1000] entries[entry["title"]]["url"] = entries[entry["title"]]["url"][0:512] entries[entry["title"]]["title"] = entries[entry["title"]]["title"][0:500] for item in channel_items: entries[entry["title"]][item] = feed.feed.get(item, "") return entries def update_entries(self, entry): #print("updating entries") # print(entry) db = self.dbpool.runOperation("INSERT INTO entries " "(url, language, description, " "title, published, scraped, feed) " "VALUES " "(%s, %s, %s, %s, %s, 'false', %s) " "ON CONFLICT (url) DO NOTHING ", (entry.get('link', ""), entry.get('language', ""), entry.get('description', ""), entry.get('title', ""), entry.get('pubDate', datetime.now()), entry.get('url', ""),)) return db
lgpl-3.0
enthought/pyside
tests/QtDeclarative/qdeclarativeview_test.py
6
1466
'''Test cases for QDeclarativeView''' import unittest from PySide.QtCore import QUrl, QObject, Property, Slot from PySide.QtDeclarative import QDeclarativeView from helper import adjust_filename, TimedQApplication class MyObject(QObject): def __init__(self, text, parent=None): QObject.__init__(self, parent) self._text = text def getText(self): return self._text @Slot(str) def qmlText(self, text): self._qmlText = text title = Property(str, getText) class TestQDeclarativeView(TimedQApplication): def testQDeclarativeViewList(self): view = QDeclarativeView() dataList = ["Item 1", "Item 2", "Item 3", "Item 4"] ctxt = view.rootContext() ctxt.setContextProperty("myModel", dataList) url = QUrl.fromLocalFile(adjust_filename('view.qml', __file__)) view.setSource(url) view.show() self.assertEqual(view.status(), QDeclarativeView.Ready) def testModelExport(self): view = QDeclarativeView() dataList = [MyObject("Item 1"), MyObject("Item 2"), MyObject("Item 3"), MyObject("Item 4")] ctxt = view.rootContext() ctxt.setContextProperty("myModel", dataList) url = QUrl.fromLocalFile(adjust_filename('viewmodel.qml', __file__)) view.setSource(url) view.show() self.assertEqual(view.status(), QDeclarativeView.Ready) if __name__ == '__main__': unittest.main()
lgpl-2.1
pelya/commandergenius
project/jni/python/src/Lib/test/test_MimeWriter.py
56
7669
"""Test program for MimeWriter module. The test program was too big to comfortably fit in the MimeWriter class, so it's here in its own file. This should generate Barry's example, modulo some quotes and newlines. """ import unittest, StringIO from test.test_support import run_unittest import warnings warnings.filterwarnings("ignore", "the MimeWriter module is deprecated.*", DeprecationWarning) from MimeWriter import MimeWriter SELLER = '''\ INTERFACE Seller-1; TYPE Seller = OBJECT DOCUMENTATION "A simple Seller interface to test ILU" METHODS price():INTEGER, END; ''' BUYER = '''\ class Buyer: def __setup__(self, maxprice): self._maxprice = maxprice def __main__(self, kos): """Entry point upon arrival at a new KOS.""" broker = kos.broker() # B4 == Barry's Big Bass Business :-) seller = broker.lookup('Seller_1.Seller', 'B4') if seller: price = seller.price() print 'Seller wants $', price, '... ' if price > self._maxprice: print 'too much!' else: print "I'll take it!" else: print 'no seller found here' ''' # Don't ask why this comment is here STATE = '''\ # instantiate a buyer instance and put it in a magic place for the KOS # to find. __kp__ = Buyer() __kp__.__setup__(500) ''' SIMPLE_METADATA = [ ("Interpreter", "python"), ("Interpreter-Version", "1.3"), ("Owner-Name", "Barry Warsaw"), ("Owner-Rendezvous", "bwarsaw@cnri.reston.va.us"), ("Home-KSS", "kss.cnri.reston.va.us"), ("Identifier", "hdl://cnri.kss/my_first_knowbot"), ("Launch-Date", "Mon Feb 12 16:39:03 EST 1996"), ] COMPLEX_METADATA = [ ("Metadata-Type", "complex"), ("Metadata-Key", "connection"), ("Access", "read-only"), ("Connection-Description", "Barry's Big Bass Business"), ("Connection-Id", "B4"), ("Connection-Direction", "client"), ] EXTERNAL_METADATA = [ ("Metadata-Type", "complex"), ("Metadata-Key", "generic-interface"), ("Access", "read-only"), ("Connection-Description", "Generic Interface for All Knowbots"), ("Connection-Id", "generic-kp"), ("Connection-Direction", "client"), ] OUTPUT = '''\ From: bwarsaw@cnri.reston.va.us Date: Mon Feb 12 17:21:48 EST 1996 To: kss-submit@cnri.reston.va.us MIME-Version: 1.0 Content-Type: multipart/knowbot; boundary="801spam999"; version="0.1" This is a multi-part message in MIME format. --801spam999 Content-Type: multipart/knowbot-metadata; boundary="802spam999" --802spam999 Content-Type: message/rfc822 KP-Metadata-Type: simple KP-Access: read-only KPMD-Interpreter: python KPMD-Interpreter-Version: 1.3 KPMD-Owner-Name: Barry Warsaw KPMD-Owner-Rendezvous: bwarsaw@cnri.reston.va.us KPMD-Home-KSS: kss.cnri.reston.va.us KPMD-Identifier: hdl://cnri.kss/my_first_knowbot KPMD-Launch-Date: Mon Feb 12 16:39:03 EST 1996 --802spam999 Content-Type: text/isl KP-Metadata-Type: complex KP-Metadata-Key: connection KP-Access: read-only KP-Connection-Description: Barry's Big Bass Business KP-Connection-Id: B4 KP-Connection-Direction: client INTERFACE Seller-1; TYPE Seller = OBJECT DOCUMENTATION "A simple Seller interface to test ILU" METHODS price():INTEGER, END; --802spam999 Content-Type: message/external-body; access-type="URL"; URL="hdl://cnri.kss/generic-knowbot" Content-Type: text/isl KP-Metadata-Type: complex KP-Metadata-Key: generic-interface KP-Access: read-only KP-Connection-Description: Generic Interface for All Knowbots KP-Connection-Id: generic-kp KP-Connection-Direction: client --802spam999-- --801spam999 Content-Type: multipart/knowbot-code; boundary="803spam999" --803spam999 Content-Type: text/plain KP-Module-Name: BuyerKP class Buyer: def __setup__(self, maxprice): self._maxprice = maxprice def __main__(self, kos): """Entry point upon arrival at a new KOS.""" broker = kos.broker() # B4 == Barry's Big Bass Business :-) seller = broker.lookup('Seller_1.Seller', 'B4') if seller: price = seller.price() print 'Seller wants $', price, '... ' if price > self._maxprice: print 'too much!' else: print "I'll take it!" else: print 'no seller found here' --803spam999-- --801spam999 Content-Type: multipart/knowbot-state; boundary="804spam999" KP-Main-Module: main --804spam999 Content-Type: text/plain KP-Module-Name: main # instantiate a buyer instance and put it in a magic place for the KOS # to find. __kp__ = Buyer() __kp__.__setup__(500) --804spam999-- --801spam999-- ''' class MimewriterTest(unittest.TestCase): def test(self): buf = StringIO.StringIO() # Toplevel headers toplevel = MimeWriter(buf) toplevel.addheader("From", "bwarsaw@cnri.reston.va.us") toplevel.addheader("Date", "Mon Feb 12 17:21:48 EST 1996") toplevel.addheader("To", "kss-submit@cnri.reston.va.us") toplevel.addheader("MIME-Version", "1.0") # Toplevel body parts f = toplevel.startmultipartbody("knowbot", "801spam999", [("version", "0.1")], prefix=0) f.write("This is a multi-part message in MIME format.\n") # First toplevel body part: metadata md = toplevel.nextpart() md.startmultipartbody("knowbot-metadata", "802spam999") # Metadata part 1 md1 = md.nextpart() md1.addheader("KP-Metadata-Type", "simple") md1.addheader("KP-Access", "read-only") m = MimeWriter(md1.startbody("message/rfc822")) for key, value in SIMPLE_METADATA: m.addheader("KPMD-" + key, value) m.flushheaders() del md1 # Metadata part 2 md2 = md.nextpart() for key, value in COMPLEX_METADATA: md2.addheader("KP-" + key, value) f = md2.startbody("text/isl") f.write(SELLER) del md2 # Metadata part 3 md3 = md.nextpart() f = md3.startbody("message/external-body", [("access-type", "URL"), ("URL", "hdl://cnri.kss/generic-knowbot")]) m = MimeWriter(f) for key, value in EXTERNAL_METADATA: md3.addheader("KP-" + key, value) md3.startbody("text/isl") # Phantom body doesn't need to be written md.lastpart() # Second toplevel body part: code code = toplevel.nextpart() code.startmultipartbody("knowbot-code", "803spam999") # Code: buyer program source buyer = code.nextpart() buyer.addheader("KP-Module-Name", "BuyerKP") f = buyer.startbody("text/plain") f.write(BUYER) code.lastpart() # Third toplevel body part: state state = toplevel.nextpart() state.addheader("KP-Main-Module", "main") state.startmultipartbody("knowbot-state", "804spam999") # State: a bunch of assignments st = state.nextpart() st.addheader("KP-Module-Name", "main") f = st.startbody("text/plain") f.write(STATE) state.lastpart() # End toplevel body parts toplevel.lastpart() self.assertEqual(buf.getvalue(), OUTPUT) def test_main(): run_unittest(MimewriterTest) if __name__ == '__main__': test_main()
lgpl-2.1
fbradyirl/home-assistant
homeassistant/components/sytadin/sensor.py
1
4655
"""Support for Sytadin Traffic, French Traffic Supervision.""" import logging import re from datetime import timedelta import requests import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( LENGTH_KILOMETERS, CONF_MONITORED_CONDITIONS, CONF_NAME, ATTR_ATTRIBUTION, ) from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) URL = "http://www.sytadin.fr/sys/barometres_de_la_circulation.jsp.html" ATTRIBUTION = "Data provided by Direction des routes Île-de-France (DiRIF)" DEFAULT_NAME = "Sytadin" REGEX = r"(\d*\.\d+|\d+)" OPTION_TRAFFIC_JAM = "traffic_jam" OPTION_MEAN_VELOCITY = "mean_velocity" OPTION_CONGESTION = "congestion" SENSOR_TYPES = { OPTION_CONGESTION: ["Congestion", ""], OPTION_MEAN_VELOCITY: ["Mean Velocity", LENGTH_KILOMETERS + "/h"], OPTION_TRAFFIC_JAM: ["Traffic Jam", LENGTH_KILOMETERS], } MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_MONITORED_CONDITIONS, default=[OPTION_TRAFFIC_JAM]): vol.All( cv.ensure_list, [vol.In(SENSOR_TYPES)] ), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up of the Sytadin Traffic sensor platform.""" _LOGGER.warning( "The sytadin integration is deprecated and will be removed " "in Home Assistant 0.100.0. For more information see ADR-0004:" "https://github.com/home-assistant/architecture/blob/master/adr/0004-webscraping.md" ) name = config.get(CONF_NAME) sytadin = SytadinData(URL) dev = [] for option in config.get(CONF_MONITORED_CONDITIONS): _LOGGER.debug("Sensor device - %s", option) dev.append( SytadinSensor( sytadin, name, option, SENSOR_TYPES[option][0], SENSOR_TYPES[option][1] ) ) add_entities(dev, True) class SytadinSensor(Entity): """Representation of a Sytadin Sensor.""" def __init__(self, data, name, sensor_type, option, unit): """Initialize the sensor.""" self.data = data self._state = None self._name = name self._option = option self._type = sensor_type self._unit = unit @property def name(self): """Return the name of the sensor.""" return "{} {}".format(self._name, self._option) @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._unit @property def device_state_attributes(self): """Return the state attributes.""" return {ATTR_ATTRIBUTION: ATTRIBUTION} def update(self): """Fetch new state data for the sensor.""" self.data.update() if self.data is None: return if self._type == OPTION_TRAFFIC_JAM: self._state = self.data.traffic_jam elif self._type == OPTION_MEAN_VELOCITY: self._state = self.data.mean_velocity elif self._type == OPTION_CONGESTION: self._state = self.data.congestion class SytadinData: """The class for handling the data retrieval.""" def __init__(self, resource): """Initialize the data object.""" self._resource = resource self.data = None self.traffic_jam = self.mean_velocity = self.congestion = None @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Get the latest data from the Sytadin.""" from bs4 import BeautifulSoup try: raw_html = requests.get(self._resource, timeout=10).text data = BeautifulSoup(raw_html, "html.parser") values = data.select(".barometre_valeur") parse_traffic_jam = re.search(REGEX, values[0].text) if parse_traffic_jam: self.traffic_jam = parse_traffic_jam.group() parse_mean_velocity = re.search(REGEX, values[1].text) if parse_mean_velocity: self.mean_velocity = parse_mean_velocity.group() parse_congestion = re.search(REGEX, values[2].text) if parse_congestion: self.congestion = parse_congestion.group() except requests.exceptions.ConnectionError: _LOGGER.error("Connection error") self.data = None
apache-2.0
Rudloff/youtube-dl
youtube_dl/extractor/hornbunny.py
169
1813
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, ) class HornBunnyIE(InfoExtractor): _VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P<title_dash>[a-z-]+)-(?P<id>\d+)\.html' _TEST = { 'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html', 'md5': '95e40865aedd08eff60272b704852ad7', 'info_dict': { 'id': '5227', 'ext': 'flv', 'title': 'panty slut jerk off instruction', 'duration': 550, 'age_limit': 18, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage( url, video_id, note='Downloading initial webpage') title = self._html_search_regex( r'class="title">(.*?)</h2>', webpage, 'title') redirect_url = self._html_search_regex( r'pg&settings=(.*?)\|0"\);', webpage, 'title') webpage2 = self._download_webpage(redirect_url, video_id) video_url = self._html_search_regex( r'flvMask:(.*?);', webpage2, 'video_url') duration = parse_duration(self._search_regex( r'<strong>Runtime:</strong>\s*([0-9:]+)</div>', webpage, 'duration', fatal=False)) view_count = int_or_none(self._search_regex( r'<strong>Views:</strong>\s*(\d+)</div>', webpage, 'view count', fatal=False)) return { 'id': video_id, 'url': video_url, 'title': title, 'ext': 'flv', 'duration': duration, 'view_count': view_count, 'age_limit': 18, }
unlicense
krishnab-datakind/mining-data-acquisition
travis_pypi_setup.py
1
4092
#!/usr/bin/env python # -*- coding: utf-8 -*- """Update encrypted deploy password in Travis config file.""" from __future__ import print_function import base64 import json import os from getpass import getpass import yaml from cryptography.hazmat.primitives.serialization import load_pem_public_key from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 try: from urllib import urlopen except ImportError: from urllib.request import urlopen GITHUB_REPO = '00krishna/mining_data_acquisition' TRAVIS_CONFIG_FILE = os.path.join( os.path.dirname(os.path.abspath(__file__)), '.travis.yml') def load_key(pubkey): """Load public RSA key. Work around keys with incorrect header/footer format. Read more about RSA encryption with cryptography: https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/ """ try: return load_pem_public_key(pubkey.encode(), default_backend()) except ValueError: # workaround for https://github.com/travis-ci/travis-api/issues/196 pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END') return load_pem_public_key(pubkey.encode(), default_backend()) def encrypt(pubkey, password): """Encrypt password using given RSA public key and encode it with base64. The encrypted password can only be decrypted by someone with the private key (in this case, only Travis). """ key = load_key(pubkey) encrypted_password = key.encrypt(password, PKCS1v15()) return base64.b64encode(encrypted_password) def fetch_public_key(repo): """Download RSA public key Travis will use for this repo. Travis API docs: http://docs.travis-ci.com/api/#repository-keys """ keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo) data = json.loads(urlopen(keyurl).read().decode()) if 'key' not in data: errmsg = "Could not find public key for repo: {}.\n".format(repo) errmsg += "Have you already added your GitHub repo to Travis?" raise ValueError(errmsg) return data['key'] def prepend_line(filepath, line): """Rewrite a file adding a line to its beginning.""" with open(filepath) as f: lines = f.readlines() lines.insert(0, line) with open(filepath, 'w') as f: f.writelines(lines) def load_yaml_config(filepath): """Load yaml config file at the given path.""" with open(filepath) as f: return yaml.load(f) def save_yaml_config(filepath, config): """Save yaml config file at the given path.""" with open(filepath, 'w') as f: yaml.dump(config, f, default_flow_style=False) def update_travis_deploy_password(encrypted_password): """Put `encrypted_password` into the deploy section of .travis.yml.""" config = load_yaml_config(TRAVIS_CONFIG_FILE) config['deploy']['password'] = dict(secure=encrypted_password) save_yaml_config(TRAVIS_CONFIG_FILE, config) line = ('# This file was autogenerated and will overwrite' ' each time you run travis_pypi_setup.py\n') prepend_line(TRAVIS_CONFIG_FILE, line) def main(args): """Add a PyPI password to .travis.yml so that Travis can deploy to PyPI. Fetch the Travis public key for the repo, and encrypt the PyPI password with it before adding, so that only Travis can decrypt and use the PyPI password. """ public_key = fetch_public_key(args.repo) password = args.password or getpass('PyPI password: ') update_travis_deploy_password(encrypt(public_key, password.encode())) print("Wrote encrypted password to .travis.yml -- you're ready to deploy") if '__main__' == __name__: import argparse parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--repo', default=GITHUB_REPO, help='GitHub repo (default: %s)' % GITHUB_REPO) parser.add_argument('--password', help='PyPI password (will prompt if not provided)') args = parser.parse_args() main(args)
mit
cgstudiomap/cgstudiomap
main/eggs/Django-1.9-py2.7.egg/django/forms/boundfield.py
135
8680
from __future__ import unicode_literals import datetime from django.forms.utils import flatatt, pretty_name from django.forms.widgets import Textarea, TextInput from django.utils import six from django.utils.encoding import ( force_text, python_2_unicode_compatible, smart_text, ) from django.utils.html import conditional_escape, format_html, html_safe from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ __all__ = ('BoundField',) UNSET = object() @html_safe @python_2_unicode_compatible class BoundField(object): "A Field plus data" def __init__(self, form, field, name): self.form = form self.field = field self.name = name self.html_name = form.add_prefix(name) self.html_initial_name = form.add_initial_prefix(name) self.html_initial_id = form.add_initial_prefix(self.auto_id) if self.field.label is None: self.label = pretty_name(name) else: self.label = self.field.label self.help_text = field.help_text or '' self._initial_value = UNSET def __str__(self): """Renders this field as an HTML widget.""" if self.field.show_hidden_initial: return self.as_widget() + self.as_hidden(only_initial=True) return self.as_widget() def __iter__(self): """ Yields rendered strings that comprise all widgets in this BoundField. This really is only useful for RadioSelect widgets, so that you can iterate over individual radio buttons in a template. """ id_ = self.field.widget.attrs.get('id') or self.auto_id attrs = {'id': id_} if id_ else {} for subwidget in self.field.widget.subwidgets(self.html_name, self.value(), attrs): yield subwidget def __len__(self): return len(list(self.__iter__())) def __getitem__(self, idx): # Prevent unnecessary reevaluation when accessing BoundField's attrs # from templates. if not isinstance(idx, six.integer_types): raise TypeError return list(self.__iter__())[idx] @property def errors(self): """ Returns an ErrorList for this field. Returns an empty ErrorList if there are none. """ return self.form.errors.get(self.name, self.form.error_class()) def as_widget(self, widget=None, attrs=None, only_initial=False): """ Renders the field by rendering the passed widget, adding any HTML attributes passed as attrs. If no widget is specified, then the field's default widget will be used. """ if not widget: widget = self.field.widget if self.field.localize: widget.is_localized = True attrs = attrs or {} if self.field.disabled: attrs['disabled'] = True auto_id = self.auto_id if auto_id and 'id' not in attrs and 'id' not in widget.attrs: if not only_initial: attrs['id'] = auto_id else: attrs['id'] = self.html_initial_id if not only_initial: name = self.html_name else: name = self.html_initial_name return force_text(widget.render(name, self.value(), attrs=attrs)) def as_text(self, attrs=None, **kwargs): """ Returns a string of HTML for representing this as an <input type="text">. """ return self.as_widget(TextInput(), attrs, **kwargs) def as_textarea(self, attrs=None, **kwargs): "Returns a string of HTML for representing this as a <textarea>." return self.as_widget(Textarea(), attrs, **kwargs) def as_hidden(self, attrs=None, **kwargs): """ Returns a string of HTML for representing this as an <input type="hidden">. """ return self.as_widget(self.field.hidden_widget(), attrs, **kwargs) @property def data(self): """ Returns the data for this BoundField, or None if it wasn't given. """ return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name) def value(self): """ Returns the value for this BoundField, using the initial value if the form is not bound or the data otherwise. """ if not self.form.is_bound: data = self.form.initial.get(self.name, self.field.initial) if callable(data): if self._initial_value is not UNSET: data = self._initial_value else: data = data() # If this is an auto-generated default date, nix the # microseconds for standardized handling. See #22502. if (isinstance(data, (datetime.datetime, datetime.time)) and not self.field.widget.supports_microseconds): data = data.replace(microsecond=0) self._initial_value = data else: data = self.field.bound_data( self.data, self.form.initial.get(self.name, self.field.initial) ) return self.field.prepare_value(data) def label_tag(self, contents=None, attrs=None, label_suffix=None): """ Wraps the given contents in a <label>, if the field has an ID attribute. contents should be 'mark_safe'd to avoid HTML escaping. If contents aren't given, uses the field's HTML-escaped label. If attrs are given, they're used as HTML attributes on the <label> tag. label_suffix allows overriding the form's label_suffix. """ contents = contents or self.label if label_suffix is None: label_suffix = (self.field.label_suffix if self.field.label_suffix is not None else self.form.label_suffix) # Only add the suffix if the label does not end in punctuation. # Translators: If found as last label character, these punctuation # characters will prevent the default label_suffix to be appended to the label if label_suffix and contents and contents[-1] not in _(':?.!'): contents = format_html('{}{}', contents, label_suffix) widget = self.field.widget id_ = widget.attrs.get('id') or self.auto_id if id_: id_for_label = widget.id_for_label(id_) if id_for_label: attrs = dict(attrs or {}, **{'for': id_for_label}) if self.field.required and hasattr(self.form, 'required_css_class'): attrs = attrs or {} if 'class' in attrs: attrs['class'] += ' ' + self.form.required_css_class else: attrs['class'] = self.form.required_css_class attrs = flatatt(attrs) if attrs else '' contents = format_html('<label{}>{}</label>', attrs, contents) else: contents = conditional_escape(contents) return mark_safe(contents) def css_classes(self, extra_classes=None): """ Returns a string of space-separated CSS classes for this field. """ if hasattr(extra_classes, 'split'): extra_classes = extra_classes.split() extra_classes = set(extra_classes or []) if self.errors and hasattr(self.form, 'error_css_class'): extra_classes.add(self.form.error_css_class) if self.field.required and hasattr(self.form, 'required_css_class'): extra_classes.add(self.form.required_css_class) return ' '.join(extra_classes) @property def is_hidden(self): "Returns True if this BoundField's widget is hidden." return self.field.widget.is_hidden @property def auto_id(self): """ Calculates and returns the ID attribute for this BoundField, if the associated Form has specified auto_id. Returns an empty string otherwise. """ auto_id = self.form.auto_id if auto_id and '%s' in smart_text(auto_id): return smart_text(auto_id) % self.html_name elif auto_id: return self.html_name return '' @property def id_for_label(self): """ Wrapper around the field widget's `id_for_label` method. Useful, for example, for focusing on this field regardless of whether it has a single widget or a MultiWidget. """ widget = self.field.widget id_ = widget.attrs.get('id') or self.auto_id return widget.id_for_label(id_)
agpl-3.0
hryamzik/ansible
lib/ansible/modules/network/avi/avi_backup.py
41
4095
#!/usr/bin/python # # @author: Gaurav Rastogi (grastogi@avinetworks.com) # Eric Anderson (eanderson@avinetworks.com) # module_check: supported # Avi Version: 17.1.1 # # Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: avi_backup author: Gaurav Rastogi (grastogi@avinetworks.com) short_description: Module for setup of Backup Avi RESTful Object description: - This module is used to configure Backup object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] version_added: "2.4" options: state: description: - The state that should be applied on the entity. default: present choices: ["absent", "present"] avi_api_update_method: description: - Default method for object update is HTTP PUT. - Setting to patch will override that behavior to use HTTP PATCH. version_added: "2.5" default: put choices: ["put", "patch"] avi_api_patch_op: description: - Patch operation to use when using avi_api_update_method as patch. version_added: "2.5" choices: ["add", "replace", "delete"] backup_config_ref: description: - Backupconfiguration information. - It is a reference to an object of type backupconfiguration. file_name: description: - The file name of backup. required: true local_file_url: description: - Url to download the backup file. remote_file_url: description: - Url to download the backup file. scheduler_ref: description: - Scheduler information. - It is a reference to an object of type scheduler. tenant_ref: description: - It is a reference to an object of type tenant. timestamp: description: - Unix timestamp of when the backup file is created. url: description: - Avi controller URL of the object. uuid: description: - Unique object identifier of the object. extends_documentation_fragment: - avi ''' EXAMPLES = """ - name: Example to create Backup object avi_backup: controller: 10.10.25.42 username: admin password: something state: present name: sample_backup """ RETURN = ''' obj: description: Backup (api/backup) object returned: success, changed type: dict ''' from ansible.module_utils.basic import AnsibleModule try: from ansible.module_utils.network.avi.avi import ( avi_common_argument_spec, HAS_AVI, avi_ansible_api) except ImportError: HAS_AVI = False def main(): argument_specs = dict( state=dict(default='present', choices=['absent', 'present']), avi_api_update_method=dict(default='put', choices=['put', 'patch']), avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), backup_config_ref=dict(type='str',), file_name=dict(type='str', required=True), local_file_url=dict(type='str',), remote_file_url=dict(type='str',), scheduler_ref=dict(type='str',), tenant_ref=dict(type='str',), timestamp=dict(type='str',), url=dict(type='str',), uuid=dict(type='str',), ) argument_specs.update(avi_common_argument_spec()) module = AnsibleModule( argument_spec=argument_specs, supports_check_mode=True) if not HAS_AVI: return module.fail_json(msg=( 'Avi python API SDK (avisdk>=17.1) is not installed. ' 'For more details visit https://github.com/avinetworks/sdk.')) return avi_ansible_api(module, 'backup', set([])) if __name__ == '__main__': main()
gpl-3.0
longjon/numpy
numpy/f2py/common_rules.py
75
4752
#!/usr/bin/env python """ Build common block mechanism for f2py2e. Copyright 2000 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee> Permission to use, modify, and distribute this software is given under the terms of the NumPy License NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. $Date: 2005/05/06 10:57:33 $ Pearu Peterson """ from __future__ import division, absolute_import, print_function __version__ = "$Revision: 1.19 $"[10:-1] from . import __version__ f2py_version = __version__.version import pprint import sys errmess=sys.stderr.write outmess=sys.stdout.write show=pprint.pprint from .auxfuncs import * from . import capi_maps from . import func2subr from .crackfortran import rmbadname ############## def findcommonblocks(block,top=1): ret = [] if hascommon(block): for n in block['common'].keys(): vars={} for v in block['common'][n]: vars[v]=block['vars'][v] ret.append((n, block['common'][n], vars)) elif hasbody(block): for b in block['body']: ret=ret+findcommonblocks(b, 0) if top: tret=[] names=[] for t in ret: if t[0] not in names: names.append(t[0]) tret.append(t) return tret return ret def buildhooks(m): ret = {'commonhooks':[],'initcommonhooks':[],'docs':['"COMMON blocks:\\n"']} fwrap = [''] def fadd(line,s=fwrap): s[0] = '%s\n %s'%(s[0], line) chooks = [''] def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0], line) ihooks = [''] def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0], line) doc = [''] def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0], line) for (name, vnames, vars) in findcommonblocks(m): lower_name = name.lower() hnames, inames = [], [] for n in vnames: if isintent_hide(vars[n]): hnames.append(n) else: inames.append(n) if hnames: outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n'%(name, ','.join(inames), ','.join(hnames))) else: outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n'%(name, ','.join(inames))) fadd('subroutine f2pyinit%s(setupfunc)'%name) fadd('external setupfunc') for n in vnames: fadd(func2subr.var2fixfortran(vars, n)) if name=='_BLNK_': fadd('common %s'%(','.join(vnames))) else: fadd('common /%s/ %s'%(name, ','.join(vnames))) fadd('call setupfunc(%s)'%(','.join(inames))) fadd('end\n') cadd('static FortranDataDef f2py_%s_def[] = {'%(name)) idims=[] for n in inames: ct = capi_maps.getctype(vars[n]) at = capi_maps.c2capi_map[ct] dm = capi_maps.getarrdims(n, vars[n]) if dm['dims']: idims.append('(%s)'%(dm['dims'])) else: idims.append('') dms=dm['dims'].strip() if not dms: dms='-1' cadd('\t{\"%s\",%s,{{%s}},%s},'%(n, dm['rank'], dms, at)) cadd('\t{NULL}\n};') inames1 = rmbadname(inames) inames1_tps = ','.join(['char *'+s for s in inames1]) cadd('static void f2py_setup_%s(%s) {'%(name, inames1_tps)) cadd('\tint i_f2py=0;') for n in inames1: cadd('\tf2py_%s_def[i_f2py++].data = %s;'%(name, n)) cadd('}') if '_' in lower_name: F_FUNC='F_FUNC_US' else: F_FUNC='F_FUNC' cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'\ %(F_FUNC, lower_name, name.upper(), ','.join(['char*']*len(inames1)))) cadd('static void f2py_init_%s(void) {'%name) cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ %(F_FUNC, lower_name, name.upper(), name)) cadd('}\n') iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(name, name, name)) tname = name.replace('_', '\\_') dadd('\\subsection{Common block \\texttt{%s}}\n'%(tname)) dadd('\\begin{description}') for n in inames: dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n, vars[n]))) if hasnote(vars[n]): note = vars[n]['note'] if isinstance(note, list): note='\n'.join(note) dadd('--- %s'%(note)) dadd('\\end{description}') ret['docs'].append('"\t/%s/ %s\\n"'%(name, ','.join(map(lambda v, d:v+d, inames, idims)))) ret['commonhooks']=chooks ret['initcommonhooks']=ihooks ret['latexdoc']=doc[0] if len(ret['docs'])<=1: ret['docs']='' return ret, fwrap[0]
bsd-3-clause
baamenabar/MIUp-sencha-app-
sdk/command/vendor/nodejs/node_modules/jasmine-node/node_modules/jasmine-reporters/ext/phantomjs/python/webpage.py
14
2275
''' This file is part of the PyPhantomJS project. Copyright (C) 2011 James Roe <roejames12@hotmail.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' from PyQt4.QtCore import QUrl, QEventLoop, qDebug from PyQt4.QtGui import QApplication from PyQt4.QtWebKit import QWebPage from plugincontroller import Bunch, do_action class WebPage(QWebPage): def __init__(self, parent=None): QWebPage.__init__(self, parent) self.parent = parent self.m_nextFileTag = '' self.m_userAgent = QWebPage.userAgentForUrl(self, QUrl()) if self.parent.m_verbose: self.currentFrame().urlChanged.connect(self.handleFrameUrlChanged) self.linkClicked.connect(self.handleLinkClicked) do_action('WebPageInit', Bunch(locals())) def handleFrameUrlChanged(self, url): qDebug('URL Changed: %s' % url.toString()) def handleLinkClicked(self, url): qDebug('URL Clicked: %s' % url.toString()) def javaScriptAlert(self, webframe, msg): print 'JavaScript alert: %s' % msg def javaScriptConsoleMessage(self, message, lineNumber, sourceID): if sourceID: print '%s:%d %s' % (sourceID, lineNumber, message) else: print message def shouldInterruptJavaScript(self): QApplication.processEvents(QEventLoop.AllEvents, 42) return False def userAgentForUrl(self, url): return self.m_userAgent def chooseFile(self, webframe, suggestedFile): if self.m_nextFileTag in self.parent.m_upload_file: return self.parent.m_upload_file[self.m_nextFileTag] return '' do_action('WebPage', Bunch(locals()))
mit
DEVELByte/incubator-airflow
dags/test_dag.py
6
1298
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from airflow import DAG from airflow.operators.dummy_operator import DummyOperator from datetime import datetime, timedelta now = datetime.now() now_to_the_hour = (now - timedelta(0, 0, 0, 0, 0, 3)).replace(minute=0, second=0, microsecond=0) START_DATE = now_to_the_hour DAG_NAME = 'test_dag_v1' default_args = { 'owner': 'airflow', 'depends_on_past': True, 'start_date': START_DATE, } dag = DAG(DAG_NAME, schedule_interval='*/10 * * * *', default_args=default_args) run_this_1 = DummyOperator(task_id='run_this_1', dag=dag) run_this_2 = DummyOperator(task_id='run_this_2', dag=dag) run_this_2.set_upstream(run_this_1) run_this_3 = DummyOperator(task_id='run_this_3', dag=dag) run_this_3.set_upstream(run_this_2)
apache-2.0
peastman/deepchem
deepchem/data/tests/test_merge.py
3
1530
""" Testing singletask/multitask dataset merging """ import os import deepchem as dc import numpy as np def test_merge(): """Test that datasets can be merged.""" current_dir = os.path.dirname(os.path.realpath(__file__)) dataset_file = os.path.join(current_dir, "../../models/tests/example.csv") featurizer = dc.feat.CircularFingerprint(size=1024) tasks = ["log-solubility"] loader = dc.data.CSVLoader( tasks=tasks, feature_field="smiles", featurizer=featurizer) first_dataset = loader.create_dataset(dataset_file) second_dataset = loader.create_dataset(dataset_file) merged_dataset = dc.data.DiskDataset.merge([first_dataset, second_dataset]) assert len(merged_dataset) == len(first_dataset) + len(second_dataset) def test_subset(): """Tests that subsetting of datasets works.""" current_dir = os.path.dirname(os.path.realpath(__file__)) dataset_file = os.path.join(current_dir, "../../models/tests/example.csv") featurizer = dc.feat.CircularFingerprint(size=1024) tasks = ["log-solubility"] loader = dc.data.CSVLoader( tasks=tasks, feature_field="smiles", featurizer=featurizer) dataset = loader.create_dataset(dataset_file, shard_size=2) shard_nums = [1, 2] orig_ids = dataset.ids _, _, _, ids_1 = dataset.get_shard(1) _, _, _, ids_2 = dataset.get_shard(2) subset = dataset.subset(shard_nums) after_ids = dataset.ids assert len(subset) == 4 assert sorted(subset.ids) == sorted(np.concatenate([ids_1, ids_2])) assert list(orig_ids) == list(after_ids)
mit
jhuttner/flake8-import-order
flake8_import_order/stdlib_list.py
1
5068
STDLIB_NAMES = set(( "AL", "BaseHTTPServer", "Bastion", "Binary", "Boolean", "CGIHTTPServer", "ColorPicker", "ConfigParser", "Cookie", "DEVICE", "DocXMLRPCServer", "EasyDialogs", "FL", "FrameWork", "GL", "HTMLParser", "MacOS", "Mapping", "MimeWriter", "MiniAEFrame", "Numeric", "Queue", "SUNAUDIODEV", "ScrolledText", "Sequence", "Set", "SimpleHTTPServer", "SimpleXMLRPCServer", "SocketServer", "StringIO", "Text", "Tix", "Tkinter", "UserDict", "UserList", "UserString", "__builtin__", "__future__", "__main__", "_dummy_thread", "_thread", "abc", "aepack", "aetools", "aetypes", "aifc", "al", "anydbm", "argparse", "array", "ast", "asynchat", "asyncio", "asyncore", "atexit", "audioop", "autoGIL", "base64", "bdb", "binascii", "binhex", "bisect", "bsddb", "builtins", "bz2", "cPickle", "cProfile", "cStringIO", "calendar", "cd", "cgi", "cgitb", "chunk", "cmath", "cmd", "code", "codecs", "codeop", "collections", "collections.abc", "colorsys", "commands", "compileall", "concurrent", "concurrent.futures", "configparser", "contextlib", "cookielib", "copy", "copy_reg", "copyreg", "crypt", "csv", "ctypes", "curses", "curses.ascii", "curses.panel", "curses.textpad", "curses.wrapper", "datetime", "dbhash", "dbm", "decimal", "difflib", "dircache", "dis", "distutils", "dl", "doctest", "dumbdbm", "dummy_thread", "dummy_threading", "email", "ensurepip", "enum", "errno", "faulthandler", "fcntl", "filecmp", "fileinput", "findertools", "fl", "flp", "fm", "fnmatch", "formatter", "fpectl", "fpformat", "fractions", "ftplib", "functools", "future_builtins", "gc", "gdbm", "gensuitemodule", "getopt", "getpass", "gettext", "gl", "glob", "grp", "gzip", "hashlib", "heapq", "hmac", "hotshot", "html", "html.entities", "html.parser", "htmlentitydefs", "htmllib", "http", "http.client", "http.cookiejar", "http.cookies", "http.server", "httplib", "ic", "imageop", "imaplib", "imgfile", "imghdr", "imp", "importlib", "imputil", "inspect", "io", "ipaddress", "itertools", "jpeg", "json", "keyword", "linecache", "locale", "logging", "logging.config", "logging.handlers", "lzma", "macostools", "macpath", "macurl2path", "mailbox", "mailcap", "marshal", "math", "md5", "mhlib", "mimetools", "mimetypes", "mimify", "mmap", "modulefinder", "msilib", "multifile", "multiprocessing", "mutex", "netrc", "new", "nis", "nntplib", "nturl2path", "numbers", "operator", "optparse", "os", "os.path", "ossaudiodev", "parser", "pathlib", "pdb", "pickle", "pickletools", "pipes", "pkgutil", "platform", "plistlib", "popen2", "poplib", "posix", "posixfile", "posixpath", "pprint", "profile", "pstats", "pty", "pwd", "py_compile", "pyclbr", "pydoc", "queue", "quopri", "random", "re", "readline", "repr", "reprlib", "resource", "rexec", "rfc822", "rlcompleter", "robotparser", "runpy", "sched", "select", "sets", "sgmllib", "sha", "shelve", "shlex", "shutil", "signal", "site", "smtpd", "smtplib", "sndhdr", "socket", "socketserver", "spwd", "sqlite3", "ssl", "stat", "statistics", "statvfs", "string", "stringprep", "struct", "subprocess", "sunau", "sunaudiodev", "symbol", "symtable", "sys", "sysconfig", "syslog", "tabnanny", "tarfile", "telnetlib", "tempfile", "termios", "test", "test.support", "test.test_support", "textwrap", "thread", "threading", "time", "timeit", "tkinter", "tkinter.scrolledtext", "tkinter.tix", "tkinter.ttk", "token", "tokenize", "trace", "traceback", "tracemalloc", "ttk", "tty", "turtle", "types", "typing", "unicodedata", "unittest", "unittest.mock", "urllib", "urllib.error", "urllib.parse", "urllib.request", "urllib.response", "urllib.robotparser", "urllib2", "urlparse", "user", "uu", "uuid", "venv", "warnings", "wave", "weakref", "webbrowser", "whichdb", "winsound", "wsgiref", "xdrlib", "xml", "xmlrpclib", "zipfile", "zipimport", "zlib", ))
lgpl-3.0
leon-adams/datascience
algorithms/hobfield.py
1
5247
# # Leon Adams # # Python Module for running a hopfield network to relocate the memory from a perturbed image. # The raw data set is represented in png image format. This code takes the three color channels (rgb) # Converts to a single channel gray scaled image and then transforms the output to a [-1,1] vector # for use in calculation of a hobfield neural network. # # Dependencies: numpy; matplotlib # # Usage # Can use as normal python module or can be used as a python script. # When calling from command line as script supply corruption percent at end of call # # Example: python hopfield.py 2 3 4 # This will produced 2, 3, and 4 percent perturbation on the image file and then # attempt to locate closest memorized pattern using hopfield network with hebb learning rule. # If called without perturbation parameters default to [1, 5, 10, 15, 20, 25] corruption percentages. # Output: output of the execution is a series of images showing first the perturbed # image with the corrupted percentages in the title. Then we show the closest memorized # image found from the hobfield network. # begin import needed libraries import sys import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg # end import libraries def rgb_to_gray_array(rgb): ''' Helper function to convert from rgb tensor to matrix gray-scaled image representation. Input: rgb tensor matrix of the three rgb color channels. output: numpy array of gray-scaled numeric values. ''' return np.dot(rgb[...,:3], np.array([0.299, 0.587, 0.114])) def read_images(filenames): ''' Read images to set to memory. Convert from rgb tensor to gray scale representation. Takes a list of filenames in directory containing pixel images. Returns a list of numpy arrays converted to gray-scale. ''' data = [( mpimg.imread(number) ) for number in filenames] return data, data[0].shape def create_vector_image(data_array): ''' Converts a gray-scaled image to [-1, +1] vector representation for hopfield networks. ''' data_array = np.where(data_array < 0.99, -1, 1) return data_array.flatten() def print_unique_cnts(array): print( np.unique(array, return_counts=True ) ) def train(memories): ''' Training function for hobfield neural network. Trained with Hebb update rule. ''' rate, c = memories.shape Weight = np.zeros((c, c)) for p in memories: Weight = Weight + np.outer(p,p) Weight[np.diag_indices(c)] = 0 return Weight/rate def look_up(Weight_matrix, candidate_pattern, shape, percent_corrupted, steps=5): ''' Given a candidate pattern, lookup closet memorized stable state. Return the stable memorized state. ''' sgn = np.vectorize(lambda x: -1 if x<0 else 1) img = None for i in range(steps): im = show_pattern(candidate_pattern, shape) candidate_pattern = sgn(np.dot(candidate_pattern, Weight_matrix)) if img is None: img = plt.imshow(im, cmap=plt.cm.binary, interpolation='nearest') plt.title(str(percent_corrupted) + ' percent corrupted pixels') else: img.set_data(im) plt.pause(.2) plt.draw() return candidate_pattern def hopfield_energy(Weight, patterns): ''' Calculates the current energy value for a given pattern and weight matrix. ''' return np.array([-0.5*np.dot(np.dot(p.T, Weight), p) for p in patterns]) def show_img(image, shape): ''' Helper function to produce visualization of an image. ''' plt.imshow(image.reshape(shape), cmap=plt.cm.binary, interpolation='nearest') plt.show() def show_pattern(pattern, shape): return np.where(pattern < 0, 0, 1).reshape(shape) def corrupts(pattern, percentage): ''' Helper function for deriving corrupted pattern images. Specify stable memory pattern and the percentage of pixels to switch. ''' counts = int( 2*np.ceil( len(pattern) * percentage / 200 ) ) neg_mask = np.where(pattern <= 0)[0] pos_mask = np.where(pattern > 0)[0] neg_corrupt_indices = np.random.choice(neg_mask, counts/2, replace = False) pos_corrupt_indices = np.random.choice(pos_mask, counts/2, replace = False) corrupt_pattern = np.copy(pattern) corrupt_pattern[neg_corrupt_indices] = 1 corrupt_pattern[pos_corrupt_indices] = -1 return corrupt_pattern data, shape = read_images(['datasets/C.png', 'datasets/D.png', 'datasets/J.png']) stable_memories = np.array([create_vector_image(rgb_to_gray_array(array)) for array in data ]) norm_weight_matrix = train(stable_memories) def test_stable_memories(stable_memory_patterns, corrupt_perentages): for memory in stable_memory_patterns: for percent in corrupt_perentages: crpt_memory = corrupts(memory, percent) look_up(norm_weight_matrix, crpt_memory, shape[0:2], percent_corrupted = percent, steps=5) if __name__ == "__main__": user_input = sys.argv if len(user_input) > 1: test_stable_memories(stable_memories, [float(i) for i in user_input[1:] ]) else: test_stable_memories(stable_memories, [1, 5, 10, 15, 20, 25])
mpl-2.0
AkA84/edx-platform
openedx/core/djangoapps/credit/migrations/0013_add_provider_status_url.py
78
12497
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'CreditProvider.provider_status_url' db.add_column('credit_creditprovider', 'provider_status_url', self.gf('django.db.models.fields.URLField')(default='', max_length=200), keep_default=False) def backwards(self, orm): # Deleting field 'CreditProvider.provider_status_url' db.delete_column('credit_creditprovider', 'provider_status_url') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'credit.creditcourse': { 'Meta': {'object_name': 'CreditCourse'}, 'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'credit.crediteligibility': { 'Meta': {'unique_together': "(('username', 'course'),)", 'object_name': 'CreditEligibility'}, 'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'eligibilities'", 'to': "orm['credit.CreditCourse']"}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'deadline': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 6, 26, 0, 0)'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}) }, 'credit.creditprovider': { 'Meta': {'object_name': 'CreditProvider'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'enable_integration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'provider_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'provider_status_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}), 'provider_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}) }, 'credit.creditrequest': { 'Meta': {'unique_together': "(('username', 'course', 'provider'),)", 'object_name': 'CreditRequest'}, 'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credit_requests'", 'to': "orm['credit.CreditCourse']"}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'parameters': ('jsonfield.fields.JSONField', [], {}), 'provider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credit_requests'", 'to': "orm['credit.CreditProvider']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '255'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}) }, 'credit.creditrequirement': { 'Meta': {'unique_together': "(('namespace', 'name', 'course'),)", 'object_name': 'CreditRequirement'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credit_requirements'", 'to': "orm['credit.CreditCourse']"}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'criteria': ('jsonfield.fields.JSONField', [], {}), 'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'namespace': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'credit.creditrequirementstatus': { 'Meta': {'unique_together': "(('username', 'requirement'),)", 'object_name': 'CreditRequirementStatus'}, 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'reason': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'requirement': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['credit.CreditRequirement']"}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}) }, 'credit.historicalcreditrequest': { 'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCreditRequest'}, 'course': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.DO_NOTHING', 'to': "orm['credit.CreditCourse']"}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), u'history_date': ('django.db.models.fields.DateTimeField', [], {}), u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}), u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'parameters': ('jsonfield.fields.JSONField', [], {}), 'provider': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.DO_NOTHING', 'to': "orm['credit.CreditProvider']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '255'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}) }, 'credit.historicalcreditrequirementstatus': { 'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCreditRequirementStatus'}, 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), u'history_date': ('django.db.models.fields.DateTimeField', [], {}), u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}), u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'reason': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'requirement': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.DO_NOTHING', 'to': "orm['credit.CreditRequirement']"}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}) } } complete_apps = ['credit']
agpl-3.0
rhertzog/django
tests/middleware_exceptions/middleware.py
4
1718
from __future__ import unicode_literals from django.http import Http404, HttpResponse from django.template import engines log = [] class BaseMiddleware(object): def __init__(self, get_response): self.get_response = get_response def __call__(self, request): return self.get_response(request) class ProcessExceptionMiddleware(BaseMiddleware): def process_exception(self, request, exception): return HttpResponse('Exception caught') class ProcessExceptionLogMiddleware(BaseMiddleware): def process_exception(self, request, exception): log.append('process-exception') class ProcessExceptionExcMiddleware(BaseMiddleware): def process_exception(self, request, exception): raise Exception('from process-exception') class ProcessViewMiddleware(BaseMiddleware): def process_view(self, request, view_func, view_args, view_kwargs): return HttpResponse('Processed view %s' % view_func.__name__) class ProcessViewNoneMiddleware(BaseMiddleware): def process_view(self, request, view_func, view_args, view_kwargs): log.append('processed view %s' % view_func.__name__) return None class TemplateResponseMiddleware(BaseMiddleware): def process_template_response(self, request, response): response.template_name = engines['django'].from_string('template-response middleware') return response class LogMiddleware(BaseMiddleware): def __call__(self, request): response = self.get_response(request) log.append((response.status_code, response.content)) return response class NotFoundMiddleware(BaseMiddleware): def __call__(self, request): raise Http404('not found')
bsd-3-clause