repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
sachinrjoglekar/MapGeist
Scripts/twitter_mapgeist.py
1
1999
#Extracts the top tweets given a search phrase, #and generates a Mind-Map from the data. #A handy tool to get an overall view of whats being talked about #a particular topic. ##Requires the 'TwitterSearch' Python library. ##Get it here: https://github.com/ckoepp/TwitterSearch from TwitterSearch import * from mapgeist.api import text_mind_map from mapgeist.visualization import visualize_tree_2D #Text phrase to be searched on Twitter search_text = 'artificial intelligence' #Number of nodes required in MindMap N = 50 #Filepath (png format) to store the MindMap to mappath = 'MindMap.png' try: #Create a TwitterSearchOrder object tso = TwitterSearchOrder() tso.set_keywords([search_text]) #Set language here tso.set_language('en') #You will need to have your own Twitter App tokens #More info here: ts = TwitterSearch( consumer_key = '', consumer_secret = '', access_token = '', access_token_secret = '' ) i = 0 f = open('tweets.txt', 'w') for tweet in ts.search_tweets_iterable(tso): line = str(tweet['text'].encode('utf-8')) terms = line.split(' ') termlist = [] for x in terms: try: #Do some rudimentary preprocessing if x == 'RT': continue elif x[0] == '@': continue elif x[0] == '#': termlist.append(x[1:]) elif '/' in x: continue else: if len(x) > 3: termlist.append(x) except: pass line = ' '.join(termlist) + '.' if len(line) > 3: f.write(line+'\n') i += 1 if i > 1000: break f.close() except TwitterSearchException as e: print(e) #Use MapGeist now mindmap, root = text_mind_map('tweets.txt', N) visualize_tree_2D(mindmap, root, mappath)
mit
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-2.7.2/Lib/encodings/utf_16_be.py
860
1037
""" Python 'utf-16-be' Codec Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """ import codecs ### Codec APIs encode = codecs.utf_16_be_encode def decode(input, errors='strict'): return codecs.utf_16_be_decode(input, errors, True) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.utf_16_be_encode(input, self.errors)[0] class IncrementalDecoder(codecs.BufferedIncrementalDecoder): _buffer_decode = codecs.utf_16_be_decode class StreamWriter(codecs.StreamWriter): encode = codecs.utf_16_be_encode class StreamReader(codecs.StreamReader): decode = codecs.utf_16_be_decode ### encodings module API def getregentry(): return codecs.CodecInfo( name='utf-16-be', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
mit
mihirwagle/crypto-steganography-img
zip-steg.py
1
10244
# The MIT License (MIT) # # Copyright (c) 2017 Mihir Wagle # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from PIL import Image import getopt, sys, math, os, struct, timeit, zipfile, subprocess, shutil from sys import platform # Number of least significant bits containing/to contain data in image num_lsb = 2 def prepare_hide(): # Prepare files for reading and writing for hiding data. global image, input_file, input_file_path try: image = Image.open(input_image_path) #input_file = open(input_file_path, "rb") #rc = subprocess.call(['7z', 'a', "-p"+key, '-y', 'myzipfile.zip'] + [ input_file_path ]) if platform == "linux" or platform == "linux2" or platform == "darwin": # linux or macOS rc = subprocess.call(['7z', 'a', "-p"+key, '-y', 'myzipfile.zip'] + [ input_file_path ]) elif platform == "win32": # Windows... rc = subprocess.call(['7z', 'a', "-p"+key, '-y', 'myzipfile.zip'] + [ input_file_path ], shell=True) input_file_path = "myzipfile.zip" input_file = open(input_file_path, "rb") except FileNotFoundError: print("Input image or file not found, will not be able to hide data.") def prepare_recover(): # Prepare files for reading and writing for recovering data. global steg_image, output_file try: steg_image = Image.open(steg_image_path) #output_file = open(output_file_path, "wb+") except FileNotFoundError: print("Steg image not found, will not be able to recover data.") def reset_buffer(): global buffer, buffer_length buffer = 0 buffer_length = 0 def and_mask(index, n): # Returns an int used to set n bits to 0 from the index:th bit when using # bitwise AND on an integer of 8 bits or less. # Ex: and_mask(3,2) --> 0b11100111 = 231. return 255 - ((1 << n) - 1 << index) def get_filesize(path): # Returns the filesize in bytes of the file at path return os.stat(path).st_size def max_bits_to_hide(image): # Returns the number of bits we're able to hide in the image # using num_lsb least significant bits. # 3 color channels per pixel, num_lsb bits per color channel. return int(3 * image.size[0] * image.size[1] * num_lsb) def bits_in_max_filesize(image): # Returns the number of bits needed to store the size of the file. return max_bits_to_hide(image).bit_length() def read_bits_from_buffer(n): # Removes the first n bits from the buffer and returns them. global buffer, buffer_length bits = buffer % (1 << n) buffer >>= n buffer_length -= n return bits def hide_data(): # Hides the data from the input file in the input image. global buffer, buffer_length, image start = timeit.default_timer() prepare_hide() reset_buffer() data = iter(memoryview(input_file.read())) color_data = list(image.getdata()) color_data_index = 0 # We add the size of the input file to the beginning of the buffer. buffer += get_filesize(input_file_path) buffer_length += bits_in_max_filesize(image) print("Hiding", buffer, "bytes") if (buffer * 8 + buffer_length > max_bits_to_hide(image)): print("Only able to hide", max_bits_to_hide(image) // 8, "B in image. PROCESS WILL FAIL!") mask = and_mask(0, num_lsb) done = False while (not done): rgb = list(color_data[color_data_index]) for i in range(3): if(buffer_length < num_lsb): # If we need more data in the buffer, add a byte from the file to it. try: buffer += next(data) << buffer_length buffer_length += 8 except StopIteration: # If we've reached the end of our data, we're done done = True # Replace the num_lsb least significant bits of each color # channel with the first num_lsb bits from the buffer. rgb[i] &= mask rgb[i] |= read_bits_from_buffer(num_lsb) color_data[color_data_index] = tuple(rgb) color_data_index += 1 image.putdata(color_data) image.save(steg_image_path, compress_level=compression) stop = timeit.default_timer() input_file.close() os.remove(input_file_path) print("Runtime: {0:.2f} s".format(stop - start)) def recover_data(): # Writes the data from the steganographed image to the output file global buffer, buffer_length, steg_image start = timeit.default_timer() prepare_recover() reset_buffer() data = bytearray() color_data = list(steg_image.getdata()) color_data_index = 0 pixels_used_for_filesize = math.ceil(bits_in_max_filesize(steg_image) / (3 * num_lsb)) for i in range(pixels_used_for_filesize): rgb = list(color_data[color_data_index]) color_data_index += 1 for i in range(3): # Add the num_lsb least significant bits # of each color channel to the buffer. buffer += (rgb[i] % (1 << num_lsb) << buffer_length) buffer_length += num_lsb # Get the size of the file we need to recover. bytes_to_recover = read_bits_from_buffer(bits_in_max_filesize(steg_image)) print("Looking to recover", bytes_to_recover, "bytes") while (bytes_to_recover > 0): rgb = list(color_data[color_data_index]) color_data_index += 1 for i in range(3): # Add the num_lsb least significant bits # of each color channel to the buffer. buffer += (rgb[i] % (1 << num_lsb)) << buffer_length buffer_length += num_lsb while (buffer_length >= 8 and bytes_to_recover > 0): # If we have more than a byte in the buffer, add it to data # and decrement the number of bytes left to recover. bits = read_bits_from_buffer(8) data += struct.pack('1B', bits) bytes_to_recover -= 1 a = open('myzipfile.zip', 'wb+') a.write(bytes(data)) a.close() src = "steg-output" with zipfile.ZipFile("myzipfile.zip", "r") as zf: zf.setpassword(key.encode('utf-8')) zf.extractall(path=src) files = os.listdir(src) for f in files: shutil.move(src+"/"+f, output_file_path) shutil.rmtree(src) os.remove('myzipfile.zip') stop = timeit.default_timer() print("Runtime: {0:.2f} s".format(stop - start)) def analysis(): # Find how much data we can hide and the size of the data to be hidden prepare_hide() print("Image resolution: (", image.size[0], ",", image.size[1], ")") print("Using", num_lsb, "LSBs: we can hide\t", max_bits_to_hide(image) // 8, "B") print("Size of input file: \t\t", get_filesize(input_file_path), "B") print("Filesize tag: \t\t\t", math.ceil(bits_in_max_filesize(image) / 8), "B") def usage(): print("\nCommand Line Arguments:\n", "-h, --hide To hide data in a image file\n", "-r, --recover To recover data from a image file\n", "-i, --image= Path to a .png file\n", "-f, --file= Path to a txt file to hide in the image file\n", "-o, --output= Path to an output folder\n", "-k, --key= How many LSBs to use\n", "-c, --compression= How many bytes to recover from the image file\n", "--help Display this message\n") try: opts, args = getopt.getopt(sys.argv[1:], 'hri:f:o:k:c:', ['hide', 'recover', 'image=', 'file=', 'output=', 'key=', 'compression=', 'help']) except getopt.GetoptError: usage() sys.exit(1) hiding_data = False recovering_data = False for opt, arg in opts: if opt in ("-h", "--hide"): hiding_data = True elif opt in ("-r", "--recover"): recovering_data = True elif opt in ("-i", "--image"): input_image_path = arg elif opt in ("-f", "--file"): input_file_path = arg elif opt in ("-o", "--output"): output_file_path = arg elif opt in ("-k", "--key="): key = arg elif opt in ("-c", "--compression="): compression = int(arg) elif opt in ("--help"): usage() sys.exit(1) else: print("Invalid argument {}".format(opt)) try: if (hiding_data): input_image_path = input_image_path input_file_path = input_file_path steg_image_path = output_file_path key = key compression = compression hide_data() if (recovering_data): steg_image_path = input_image_path output_file_path = output_file_path key = key compression = compression recover_data() except Exception as e: print("Ran into an error during execution. Check input and try again.\n") print(e) usage() sys.exit(1) # Initial paths, variables used. #input_image_path = "pic.png" #steg_image_path = "steg_image.png" #input_file_path = "a.txt" #output_file_path = "b.txt" #key = "MihirWagle" #compression = 1
mit
mpuig/TileStache
TileStache/Goodies/Providers/SolrGeoJSON.py
4
6850
""" Provider that returns GeoJSON data responses from Solr spatial queries. This is an example of a provider that does not return an image, but rather queries a Solr instance for raw data and replies with a string of GeoJSON. Read more about the GeoJSON spec at: http://geojson.org/geojson-spec.html Caveats: Example TileStache provider configuration: "solr": { "provider": {"class": "TileStache.Goodies.Providers.SolrGeoJSON.Provider", "kwargs": { "solr_endpoint": "http://localhost:8983/solr/example", "solr_query": "*:*", }} } The following optional parameters are also supported: latitude_field: The name of the latitude field associated with your query parser; the default is 'latitude' longitude_field: The name of the longitude field associated with your query parser, default is 'longitude response_fields: A comma-separated list of fields with which to filter the Solr response; the default is '' (or: include all fields) id_field: The name name of your Solr instance's unique ID field; the default is ''. By default queries are scoped to the bounding box of a given tile. Radial queries are also supported if you supply a 'radius' kwarg to your provider and have installed the JTeam spatial plugin: http://www.jteam.nl/news/spatialsolr.html. For example: "solr": { "provider": {"class": "TileStache.Goodies.Providers.SolrGeoJSON.Provider", "kwargs": { "solr_endpoint": "http://localhost:8983/solr/example", "solr_query": 'foo:bar', "radius": "1", }} } Radial queries are begin at the center of the tile being rendered and distances are measured in kilometers. The following optional parameters are also supported for radial queries: query_parser: The name of the Solr query parser associated with your spatial plugin; the default is 'spatial'. """ from math import log, tan, pi, atan, pow, e from re import compile from json import JSONEncoder from TileStache.Core import KnownUnknown from TileStache.Geography import getProjectionByName try: import pysolr except ImportError: # well it won't work but we can still make the documentation. pass class SaveableResponse: """ Wrapper class for JSON response that makes it behave like a PIL.Image object. TileStache.getTile() expects to be able to save one of these to a buffer. """ def __init__(self, content): self.content = content def save(self, out, format): if format != 'JSON': raise KnownUnknown('SolrGeoJSON only saves .json tiles, not "%s"' % format) encoded = JSONEncoder(indent=2).iterencode(self.content) float_pat = compile(r'^-?\d+\.\d+$') for atom in encoded: if float_pat.match(atom): out.write('%.6f' % float(atom)) else: out.write(atom) class Provider: """ """ def __init__(self, layer, solr_endpoint, solr_query, **kwargs): self.projection = getProjectionByName('spherical mercator') self.layer = layer self.endpoint = str(solr_endpoint) self.query = solr_query self.solr = pysolr.Solr(self.endpoint) self.query_parser = kwargs.get('query_parser', 'spatial') self.lat_field = kwargs.get('latitude_column', 'latitude') self.lon_field = kwargs.get('longitude_column', 'longitude') self.id_field = kwargs.get('id_column', '') self.solr_radius = kwargs.get('radius', None) self.solr_fields = kwargs.get('response_fields', None) def getTypeByExtension(self, extension): """ Get mime-type and format by file extension. This only accepts "json". """ if extension.lower() != 'json': raise KnownUnknown('PostGeoJSON only makes .json tiles, not "%s"' % extension) return 'text/json', 'JSON' def unproject(self, x, y): x, y = x / 6378137, y / 6378137 # dimensions of the earth lat, lon = 2 * atan(pow(e, y)) - .5 * pi, x # basic spherical mercator lat, lon = lat * 180/pi, lon * 180/pi # radians to degrees return lat, lon def renderTile(self, width, height, srs, coord): """ Render a single tile, return a SaveableResponse instance. """ minx, miny, maxx, maxy = self.layer.envelope(coord) y = miny + ((maxy - miny) / 2) x = minx + ((maxx - minx) / 2) sw_lat, sw_lon = self.unproject(minx, miny) ne_lat, ne_lon = self.unproject(maxx, maxy) center_lat, center_lon = self.unproject(x, y) bbox = "%s:[%s TO %s] AND %s:[%s TO %s]" % (self.lon_field, sw_lon, ne_lon, self.lat_field, sw_lat, ne_lat) query = bbox # for example: # {!spatial lat=51.500152 long=-0.126236 radius=10 calc=arc unit=km}*:* if self.solr_radius: query = "{!%s lat=%s long=%s radius=%s calc=arc unit=km}%s" % (self.query_parser, center_lat, center_lon, self.solr_radius, bbox) kwargs = {} if self.query != '*:*': kwargs['fq'] = self.query kwargs['omitHeader'] = 'true' rsp_fields = [] if self.solr_fields: rsp_fields = self.solr_fields.split(',') if not self.lat_field in rsp_fields: rsp_fields.append(self.lat_field) if not self.lon_field in rsp_fields: rsp_fields.append(self.lon_field) kwargs['fl'] = ','.join(rsp_fields) response = {'type': 'FeatureCollection', 'features': []} total = None start = 0 rows = 1000 while not total or start < total: kwargs['start'] = start kwargs['rows'] = rows rsp = self.solr.search(query, **kwargs) if not total: total = rsp.hits if total == 0: break for row in rsp: # hack until I figure out why passing &fl in a JSON # context does not actually limit the fields returned if len(rsp_fields): for key, ignore in row.items(): if not key in rsp_fields: del(row[key]) row['geometry'] = { 'type': 'Point', 'coordinates': (row[ self.lon_field ], row[ self.lat_field ]) } del(row[ self.lat_field ]) del(row[ self.lon_field ]) if self.id_field != '': row['id'] = row[ self.id_field ] response['features'].append(row) start += rows return SaveableResponse(response) # -*- indent-tabs-mode:nil tab-width:4 -*-
bsd-3-clause
davidpvilaca/TEP
aula1/aula1.py
1
1609
# -*- coding: utf-8 -*- """ Spyder Editor Este é um arquivo de script temporário. """ import matplotlib.pyplot as plt import numpy as np f = plt.imread('field.png') plt.imshow(f) # red r = f.copy() plt.imshow(r) r[:,:,1] = 0 r[:,:,2] = 0 plt.imshow(r) # green g = f.copy() g[:,:,0] = 0 g[:,:,2] = 0 plt.imshow(g) # blue b = f.copy() b[:,:,0] = 0 b[:,:,1] = 0 plt.imshow(b) plt.imshow(f) # grayscale gs1 = (f[:,:,0] + f[:,:,1] + f[:,:,2]) / 3 plt.imshow(gs1) plt.imshow(gs1, cmap=plt.cm.Greys_r) plt.imshow(f) gs2 = 0.299*f[:,:,0] + 0.587*f[:,:,1] + 0.114*f[:,:,2] plt.imshow(gs2, cmap=plt.cm.Greys_r) plt.imshow(f) h = f.copy() plt.imshow(h) idx = h[:,:,1] > 0.5 idx.shape h[idx,1] = 0 plt.imshow(h) h = f.copy() idx = h[:,:,1] > h[:,:,0] h[idx,1] = 0 plt.imshow(h) h.shape plt.imshow(f) # histograma plt.imshow(b) plt.hist(b.ravel(), 256, [0, 1]) plt.hist(f[:,:,2].ravel(), 256, [0, 1]) plt.hist(f[:,:,0].ravel(), 256, [0, 1]) plt.hist(f[:,:,1].ravel(), 256, [0, 1]) plt.hist(f[:,:,2].ravel(), 256, [0, 1]) plt.hist(f[:,:,0].ravel(), 256, [0, 1], color='r') plt.hist(f[:,:,1].ravel(), 256, [0, 1], color='g') plt.hist(f[:,:,2].ravel(), 256, [0, 1], color='b') plt.hist(gs2.ravel(), 256, [0, 1]) hs,bins = np.histogram(gs2, bins=256) plt.plot(hs) plt.hist(gs2.ravel(), 256, [0, 1]) hr,bins = np.histogram(f[:,:,0], bins=256) hg,bins = np.histogram(f[:,:,1], bins=256) hb,bins = np.histogram(f[:,:,2], bins=256) plt.plot(hr, color='r') plt.plot(hg, color='g') plt.plot(hb, color='b') nf = 1 - f plt.imshow(nf)
mit
scrollback/kuma
vendor/lib/python/pytz/tzfile.py
7
4736
#!/usr/bin/env python ''' $Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $ ''' try: from cStringIO import StringIO except ImportError: from io import StringIO from datetime import datetime, timedelta from struct import unpack, calcsize from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo from pytz.tzinfo import memorized_datetime, memorized_timedelta def _byte_string(s): """Cast a string or byte string to an ASCII byte string.""" return s.encode('US-ASCII') _NULL = _byte_string('\0') def _std_string(s): """Cast a string or byte string to an ASCII string.""" return str(s.decode('US-ASCII')) def build_tzinfo(zone, fp): head_fmt = '>4s c 15x 6l' head_size = calcsize(head_fmt) (magic, format, ttisgmtcnt, ttisstdcnt,leapcnt, timecnt, typecnt, charcnt) = unpack(head_fmt, fp.read(head_size)) # Make sure it is a tzfile(5) file assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic) # Read out the transition times, localtime indices and ttinfo structures. data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict( timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt) data_size = calcsize(data_fmt) data = unpack(data_fmt, fp.read(data_size)) # make sure we unpacked the right number of values assert len(data) == 2 * timecnt + 3 * typecnt + 1 transitions = [memorized_datetime(trans) for trans in data[:timecnt]] lindexes = list(data[timecnt:2 * timecnt]) ttinfo_raw = data[2 * timecnt:-1] tznames_raw = data[-1] del data # Process ttinfo into separate structs ttinfo = [] tznames = {} i = 0 while i < len(ttinfo_raw): # have we looked up this timezone name yet? tzname_offset = ttinfo_raw[i+2] if tzname_offset not in tznames: nul = tznames_raw.find(_NULL, tzname_offset) if nul < 0: nul = len(tznames_raw) tznames[tzname_offset] = _std_string( tznames_raw[tzname_offset:nul]) ttinfo.append((ttinfo_raw[i], bool(ttinfo_raw[i+1]), tznames[tzname_offset])) i += 3 # Now build the timezone object if len(transitions) == 0: ttinfo[0][0], ttinfo[0][2] cls = type(zone, (StaticTzInfo,), dict( zone=zone, _utcoffset=memorized_timedelta(ttinfo[0][0]), _tzname=ttinfo[0][2])) else: # Early dates use the first standard time ttinfo i = 0 while ttinfo[i][1]: i += 1 if ttinfo[i] == ttinfo[lindexes[0]]: transitions[0] = datetime.min else: transitions.insert(0, datetime.min) lindexes.insert(0, i) # calculate transition info transition_info = [] for i in range(len(transitions)): inf = ttinfo[lindexes[i]] utcoffset = inf[0] if not inf[1]: dst = 0 else: for j in range(i-1, -1, -1): prev_inf = ttinfo[lindexes[j]] if not prev_inf[1]: break dst = inf[0] - prev_inf[0] # dst offset if dst <= 0: # Bad dst? Look further. for j in range(i+1, len(transitions)): stdinf = ttinfo[lindexes[j]] if not stdinf[1]: dst = inf[0] - stdinf[0] if dst > 0: break # Found a useful std time. tzname = inf[2] # Round utcoffset and dst to the nearest minute or the # datetime library will complain. Conversions to these timezones # might be up to plus or minus 30 seconds out, but it is # the best we can do. utcoffset = int((utcoffset + 30) // 60) * 60 dst = int((dst + 30) // 60) * 60 transition_info.append(memorized_ttinfo(utcoffset, dst, tzname)) cls = type(zone, (DstTzInfo,), dict( zone=zone, _utc_transition_times=transitions, _transition_info=transition_info)) return cls() if __name__ == '__main__': import os.path from pprint import pprint base = os.path.join(os.path.dirname(__file__), 'zoneinfo') tz = build_tzinfo('Australia/Melbourne', open(os.path.join(base,'Australia','Melbourne'), 'rb')) tz = build_tzinfo('US/Eastern', open(os.path.join(base,'US','Eastern'), 'rb')) pprint(tz._utc_transition_times) #print tz.asPython(4) #print tz.transitions_mapping
mpl-2.0
ogenstad/ansible
lib/ansible/modules/cloud/amazon/aws_waf_rule.py
42
12179
#!/usr/bin/python # Copyright (c) 2017 Will Thames # Copyright (c) 2015 Mike Mochan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: aws_waf_rule short_description: create and delete WAF Rules description: - Read the AWS documentation for WAF U(https://aws.amazon.com/documentation/waf/) version_added: "2.5" author: - Mike Mochan (@mmochan) - Will Thames (@willthames) extends_documentation_fragment: - aws - ec2 options: name: description: Name of the Web Application Firewall rule required: yes metric_name: description: - A friendly name or description for the metrics for the rule - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. - You can't change metric_name after you create the rule - Defaults to the same as name with disallowed characters removed state: description: whether the rule should be present or absent choices: - present - absent default: present conditions: description: > list of conditions used in the rule. Each condition should contain I(type): which is one of [C(byte), C(geo), C(ip), C(size), C(sql) or C(xss)] I(negated): whether the condition should be negated, and C(condition), the name of the existing condition. M(aws_waf_condition) can be used to create new conditions purge_conditions: description: - Whether or not to remove conditions that are not passed when updating `conditions`. Defaults to false. ''' EXAMPLES = ''' - name: create WAF rule aws_waf_rule: name: my_waf_rule conditions: - name: my_regex_condition type: regex negated: no - name: my_geo_condition type: geo negated: no - name: my_byte_condition type: byte negated: yes - name: remove WAF rule aws_waf_rule: name: "my_waf_rule" state: absent ''' RETURN = ''' rule: description: WAF rule contents returned: always type: complex contains: metric_name: description: Metric name for the rule returned: always type: string sample: ansibletest1234rule name: description: Friendly name for the rule returned: always type: string sample: ansible-test-1234_rule predicates: description: List of conditions used in the rule returned: always type: complex contains: data_id: description: ID of the condition returned: always type: string sample: 8251acdb-526c-42a8-92bc-d3d13e584166 negated: description: Whether the sense of the condition is negated returned: always type: bool sample: false type: description: type of the condition returned: always type: string sample: ByteMatch rule_id: description: ID of the WAF rule returned: always type: string sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261 ''' import re try: import botocore except ImportError: pass # handled by AnsibleAWSModule from ansible.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec from ansible.module_utils.ec2 import camel_dict_to_snake_dict from ansible.module_utils.aws.waf import run_func_with_change_token_backoff, list_rules_with_backoff, MATCH_LOOKUP from ansible.module_utils.aws.waf import get_web_acl_with_backoff, list_web_acls_with_backoff def get_rule_by_name(client, module, name): rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name] if rules: return rules[0] def get_rule(client, module, rule_id): try: return client.get_rule(RuleId=rule_id)['Rule'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Could not get WAF rule') def list_rules(client, module): try: return list_rules_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Could not list WAF rules') def find_and_update_rule(client, module, rule_id): rule = get_rule(client, module, rule_id) rule_id = rule['RuleId'] existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP) all_conditions = dict() for condition_type in MATCH_LOOKUP: method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's' all_conditions[condition_type] = dict() try: paginator = client.get_paginator(method) func = paginator.paginate().build_full_result except (KeyError, botocore.exceptions.OperationNotPageableError): # list_geo_match_sets and list_regex_match_sets do not have a paginator # and throw different exceptions func = getattr(client, method) try: pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type) for pred in pred_results: pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id'] all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred) all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred) for condition in module.params['conditions']: desired_conditions[condition['type']][condition['name']] = condition reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items()) for condition in rule['Predicates']: existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition) insertions = list() deletions = list() for condition_type in desired_conditions: for (condition_name, condition) in desired_conditions[condition_type].items(): if condition_name not in all_conditions[condition_type]: module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type)) condition['data_id'] = all_conditions[condition_type][condition_name]['data_id'] if condition['data_id'] not in existing_conditions[condition_type]: insertions.append(format_for_insertion(condition)) if module.params['purge_conditions']: for condition_type in existing_conditions: deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values() if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]]) changed = bool(insertions or deletions) update = { 'RuleId': rule_id, 'Updates': insertions + deletions } if changed: try: run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Could not update rule conditions') return changed, get_rule(client, module, rule_id) def format_for_insertion(condition): return dict(Action='INSERT', Predicate=dict(Negated=condition['negated'], Type=MATCH_LOOKUP[condition['type']]['type'], DataId=condition['data_id'])) def format_for_deletion(condition): return dict(Action='DELETE', Predicate=dict(Negated=condition['negated'], Type=condition['type'], DataId=condition['data_id'])) def remove_rule_conditions(client, module, rule_id): conditions = get_rule(client, module, rule_id)['Predicates'] updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions] try: run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Could not remove rule conditions') def ensure_rule_present(client, module): name = module.params['name'] rule_id = get_rule_by_name(client, module, name) params = dict() if rule_id: return find_and_update_rule(client, module, rule_id) else: params['Name'] = module.params['name'] metric_name = module.params['metric_name'] if not metric_name: metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name']) params['MetricName'] = metric_name try: new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Could not create rule') return find_and_update_rule(client, module, new_rule['RuleId']) def find_rule_in_web_acls(client, module, rule_id): web_acls_in_use = [] try: all_web_acls = list_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Could not list Web ACLs') for web_acl in all_web_acls: try: web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Could not get Web ACL details') if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]: web_acls_in_use.append(web_acl_details['Name']) return web_acls_in_use def ensure_rule_absent(client, module): rule_id = get_rule_by_name(client, module, module.params['name']) in_use_web_acls = find_rule_in_web_acls(client, module, rule_id) if in_use_web_acls: web_acl_names = ', '.join(in_use_web_acls) module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" % (module.params['name'], web_acl_names)) if rule_id: remove_rule_conditions(client, module, rule_id) try: return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Could not delete rule') return False, {} def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True), metric_name=dict(), state=dict(default='present', choices=['present', 'absent']), conditions=dict(type='list'), purge_conditions=dict(type='bool', default=False) ), ) module = AnsibleAWSModule(argument_spec=argument_spec) state = module.params.get('state') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='waf', region=region, endpoint=ec2_url, **aws_connect_kwargs) if state == 'present': (changed, results) = ensure_rule_present(client, module) else: (changed, results) = ensure_rule_absent(client, module) module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results)) if __name__ == '__main__': main()
gpl-3.0
indianajohn/ycmd
ycmd/tests/typescript/get_completions_test.py
1
3158
# Copyright (C) 2015 ycmd contributors # # This file is part of ycmd. # # ycmd is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ycmd is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ycmd. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function from __future__ import division from future import standard_library standard_library.install_aliases() from builtins import * # noqa from hamcrest import assert_that, contains_inanyorder, has_entries from .typescript_handlers_test import Typescript_Handlers_test from ycmd.utils import ReadFile from mock import patch class TypeScript_GetCompletions_test( Typescript_Handlers_test ): def _RunTest( self, test ): filepath = self._PathToTestFile( 'test.ts' ) contents = ReadFile( filepath ) event_data = self._BuildRequest( filepath = filepath, filetype = 'typescript', contents = contents, event_name = 'BufferVisit' ) self._app.post_json( '/event_notification', event_data ) completion_data = self._BuildRequest( filepath = filepath, filetype = 'typescript', contents = contents, force_semantic = True, line_num = 12, column_num = 6 ) response = self._app.post_json( '/completions', completion_data ) assert_that( response.json, test[ 'expect' ][ 'data' ] ) def Basic_test( self ): self._RunTest( { 'expect': { 'data': has_entries( { 'completions': contains_inanyorder( self.CompletionEntryMatcher( 'methodA', 'methodA (method) Foo.methodA(): void' ), self.CompletionEntryMatcher( 'methodB', 'methodB (method) Foo.methodB(): void' ), self.CompletionEntryMatcher( 'methodC', 'methodC (method) Foo.methodC(): void' ), ) } ) } } ) @patch( 'ycmd.completers.typescript.typescript_completer.MAX_DETAILED_COMPLETIONS', 2 ) def MaxDetailedCompletion_test( self ): self._RunTest( { 'expect': { 'data': has_entries( { 'completions': contains_inanyorder( self.CompletionEntryMatcher( 'methodA' ), self.CompletionEntryMatcher( 'methodB' ), self.CompletionEntryMatcher( 'methodC' ) ) } ) } } )
gpl-3.0
dabiboo/youtube-dl
youtube_dl/extractor/__init__.py
1
22070
from __future__ import unicode_literals from .abc import ABCIE from .abc7news import Abc7NewsIE from .academicearth import AcademicEarthCourseIE from .addanime import AddAnimeIE from .adobetv import ( AdobeTVIE, AdobeTVVideoIE, ) from .adultswim import AdultSwimIE from .aftenposten import AftenpostenIE from .aftonbladet import AftonbladetIE from .airmozilla import AirMozillaIE from .aljazeera import AlJazeeraIE from .alphaporno import AlphaPornoIE from .anitube import AnitubeIE from .anysex import AnySexIE from .aol import AolIE from .allocine import AllocineIE from .aparat import AparatIE from .appleconnect import AppleConnectIE from .appletrailers import AppleTrailersIE from .archiveorg import ArchiveOrgIE from .ard import ( ARDIE, ARDMediathekIE, SportschauIE, ) from .arte import ( ArteTvIE, ArteTVPlus7IE, ArteTVCreativeIE, ArteTVConcertIE, ArteTVFutureIE, ArteTVDDCIE, ArteTVEmbedIE, ) from .atresplayer import AtresPlayerIE from .atttechchannel import ATTTechChannelIE from .audiomack import AudiomackIE, AudiomackAlbumIE from .azubu import AzubuIE from .baidu import BaiduVideoIE from .bambuser import BambuserIE, BambuserChannelIE from .bandcamp import BandcampIE, BandcampAlbumIE from .bbc import ( BBCCoUkIE, BBCIE, ) from .beeg import BeegIE from .behindkink import BehindKinkIE from .beatportpro import BeatportProIE from .bet import BetIE from .bild import BildIE from .bilibili import BiliBiliIE from .blinkx import BlinkxIE from .bliptv import BlipTVIE, BlipTVUserIE from .bloomberg import BloombergIE from .bpb import BpbIE from .br import BRIE from .breakcom import BreakIE from .brightcove import BrightcoveIE from .buzzfeed import BuzzFeedIE from .byutv import BYUtvIE from .c56 import C56IE from .camdemy import ( CamdemyIE, CamdemyFolderIE ) from .canal13cl import Canal13clIE from .canalplus import CanalplusIE from .canalc2 import Canalc2IE from .cbs import CBSIE from .cbsnews import CBSNewsIE from .cbssports import CBSSportsIE from .ccc import CCCIE from .ceskatelevize import CeskaTelevizeIE from .channel9 import Channel9IE from .chaturbate import ChaturbateIE from .chilloutzone import ChilloutzoneIE from .chirbit import ( ChirbitIE, ChirbitProfileIE, ) from .cinchcast import CinchcastIE from .cinemassacre import CinemassacreIE from .clipfish import ClipfishIE from .cliphunter import CliphunterIE from .clipsyndicate import ClipsyndicateIE from .cloudy import CloudyIE from .clubic import ClubicIE from .cmt import CMTIE from .cnet import CNETIE from .cnn import ( CNNIE, CNNBlogsIE, CNNArticleIE, ) from .collegehumor import CollegeHumorIE from .collegerama import CollegeRamaIE from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE from .comcarcoff import ComCarCoffIE from .commonmistakes import CommonMistakesIE, UnicodeBOMIE from .condenast import CondeNastIE from .cracked import CrackedIE from .criterion import CriterionIE from .crooksandliars import CrooksAndLiarsIE from .crunchyroll import ( CrunchyrollIE, CrunchyrollShowPlaylistIE ) from .cspan import CSpanIE from .ctsnews import CtsNewsIE from .dailymotion import ( DailymotionIE, DailymotionPlaylistIE, DailymotionUserIE, DailymotionCloudIE, ) from .daum import DaumIE from .dbtv import DBTVIE from .dcn import DCNIE from .dctp import DctpTvIE from .deezer import DeezerPlaylistIE from .dfb import DFBIE from .dhm import DHMIE from .dotsub import DotsubIE from .douyutv import DouyuTVIE from .dramafever import ( DramaFeverIE, DramaFeverSeriesIE, ) from .dreisat import DreiSatIE from .drbonanza import DRBonanzaIE from .drtuber import DrTuberIE from .drtv import DRTVIE from .dvtv import DVTVIE from .dump import DumpIE from .dumpert import DumpertIE from .defense import DefenseGouvFrIE from .discovery import DiscoveryIE from .dropbox import DropboxIE from .eagleplatform import EaglePlatformIE from .ebaumsworld import EbaumsWorldIE from .echomsk import EchoMskIE from .ehow import EHowIE from .eighttracks import EightTracksIE from .einthusan import EinthusanIE from .eitb import EitbIE from .ellentv import ( EllenTVIE, EllenTVClipsIE, ) from .elpais import ElPaisIE from .embedly import EmbedlyIE from .engadget import EngadgetIE from .eporner import EpornerIE from .eroprofile import EroProfileIE from .escapist import EscapistIE from .espn import ESPNIE from .esri import EsriVideoIE from .europa import EuropaIE from .everyonesmixtape import EveryonesMixtapeIE from .exfm import ExfmIE from .expotv import ExpoTVIE from .extremetube import ExtremeTubeIE from .facebook import FacebookIE from .faz import FazIE from .fc2 import FC2IE from .firstpost import FirstpostIE from .firsttv import FirstTVIE from .fivemin import FiveMinIE from .fivetv import FiveTVIE from .fktv import FKTVIE from .flickr import FlickrIE from .folketinget import FolketingetIE from .footyroom import FootyRoomIE from .fourtube import FourTubeIE from .foxgay import FoxgayIE from .foxnews import FoxNewsIE from .foxsports import FoxSportsIE from .franceculture import FranceCultureIE from .franceinter import FranceInterIE from .francetv import ( PluzzIE, FranceTvInfoIE, FranceTVIE, GenerationQuoiIE, CultureboxIE, ) from .freesound import FreesoundIE from .freespeech import FreespeechIE from .freevideo import FreeVideoIE from .funnyordie import FunnyOrDieIE from .gamekings import GamekingsIE from .gameone import ( GameOneIE, GameOnePlaylistIE, ) from .gamersyde import GamersydeIE from .gamespot import GameSpotIE from .gamestar import GameStarIE from .gametrailers import GametrailersIE from .gazeta import GazetaIE from .gdcvault import GDCVaultIE from .generic import GenericIE from .gfycat import GfycatIE from .giantbomb import GiantBombIE from .giga import GigaIE from .glide import GlideIE from .globo import GloboIE from .godtube import GodTubeIE from .goldenmoustache import GoldenMoustacheIE from .golem import GolemIE from .googleplus import GooglePlusIE from .googlesearch import GoogleSearchIE from .gorillavid import GorillaVidIE from .goshgay import GoshgayIE from .groupon import GrouponIE from .hark import HarkIE from .hearthisat import HearThisAtIE from .heise import HeiseIE from .hellporno import HellPornoIE from .helsinki import HelsinkiIE from .hentaistigma import HentaiStigmaIE from .historicfilms import HistoricFilmsIE from .history import HistoryIE from .hitbox import HitboxIE, HitboxLiveIE from .hornbunny import HornBunnyIE from .hotnewhiphop import HotNewHipHopIE from .howcast import HowcastIE from .howstuffworks import HowStuffWorksIE from .huffpost import HuffPostIE from .hypem import HypemIE from .iconosquare import IconosquareIE from .ign import IGNIE, OneUPIE from .imdb import ( ImdbIE, ImdbListIE ) from .imgur import ( ImgurIE, ImgurAlbumIE, ) from .ina import InaIE from .indavideo import ( IndavideoIE, IndavideoEmbedIE, ) from .infoq import InfoQIE from .instagram import InstagramIE, InstagramUserIE from .internetvideoarchive import InternetVideoArchiveIE from .iprima import IPrimaIE from .iqiyi import IqiyiIE from .ir90tv import Ir90TvIE from .ivi import ( IviIE, IviCompilationIE ) from .izlesene import IzleseneIE from .jadorecettepub import JadoreCettePubIE from .jeuxvideo import JeuxVideoIE from .jove import JoveIE from .jukebox import JukeboxIE from .jpopsukitv import JpopsukiIE from .kaltura import KalturaIE from .kanalplay import KanalPlayIE from .kankan import KankanIE from .karaoketv import KaraoketvIE from .karrierevideos import KarriereVideosIE from .keezmovies import KeezMoviesIE from .khanacademy import KhanAcademyIE from .kickstarter import KickStarterIE from .keek import KeekIE from .kontrtube import KontrTubeIE from .krasview import KrasViewIE from .ku6 import Ku6IE from .kuwo import ( KuwoIE, KuwoAlbumIE, KuwoChartIE, KuwoSingerIE, KuwoCategoryIE, KuwoMvIE, ) from .la7 import LA7IE from .laola1tv import Laola1TvIE from .lecture2go import Lecture2GoIE from .letv import ( LetvIE, LetvTvIE, LetvPlaylistIE ) from .libsyn import LibsynIE from .lifenews import ( LifeNewsIE, LifeEmbedIE, ) from .limelight import ( LimelightMediaIE, LimelightChannelIE, LimelightChannelListIE, ) from .liveleak import LiveLeakIE from .livestream import ( LivestreamIE, LivestreamOriginalIE, LivestreamShortenerIE, ) from .lnkgo import LnkGoIE from .lrt import LRTIE from .lynda import ( LyndaIE, LyndaCourseIE ) from .m6 import M6IE from .macgamestore import MacGameStoreIE from .mailru import MailRuIE from .malemotion import MalemotionIE from .mdr import MDRIE from .megavideoz import MegaVideozIE from .metacafe import MetacafeIE from .metacritic import MetacriticIE from .mgoon import MgoonIE from .minhateca import MinhatecaIE from .ministrygrid import MinistryGridIE from .miomio import MioMioIE from .mit import TechTVMITIE, MITIE, OCWMITIE from .mitele import MiTeleIE from .mixcloud import MixcloudIE from .mlb import MLBIE from .mpora import MporaIE from .moevideo import MoeVideoIE from .mofosex import MofosexIE from .mojvideo import MojvideoIE from .moniker import MonikerIE from .mooshare import MooshareIE from .morningstar import MorningstarIE from .motherless import MotherlessIE from .motorsport import MotorsportIE from .movieclips import MovieClipsIE from .moviezine import MoviezineIE from .movshare import MovShareIE from .mtv import ( MTVIE, MTVServicesEmbeddedIE, MTVIggyIE, MTVDEIE, ) from .muenchentv import MuenchenTVIE from .musicplayon import MusicPlayOnIE from .muzu import MuzuTVIE from .mwave import MwaveIE from .myspace import MySpaceIE, MySpaceAlbumIE from .myspass import MySpassIE from .myvi import MyviIE from .myvideo import MyVideoIE from .myvidster import MyVidsterIE from .nationalgeographic import NationalGeographicIE from .naver import NaverIE from .nba import NBAIE from .nbc import ( NBCIE, NBCNewsIE, NBCSportsIE, NBCSportsVPlayerIE, MSNBCIE, ) from .ndr import ( NDRIE, NJoyIE, NDREmbedBaseIE, NDREmbedIE, NJoyEmbedIE, ) from .ndtv import NDTVIE from .netzkino import NetzkinoIE from .nerdcubed import NerdCubedFeedIE from .nerdist import NerdistIE from .neteasemusic import ( NetEaseMusicIE, NetEaseMusicAlbumIE, NetEaseMusicSingerIE, NetEaseMusicListIE, NetEaseMusicMvIE, NetEaseMusicProgramIE, NetEaseMusicDjRadioIE, ) from .newgrounds import NewgroundsIE from .newstube import NewstubeIE from .nextmedia import ( NextMediaIE, NextMediaActionNewsIE, AppleDailyIE, ) from .nfb import NFBIE from .nfl import NFLIE from .nhl import ( NHLIE, NHLNewsIE, NHLVideocenterIE, ) from .niconico import NiconicoIE, NiconicoPlaylistIE from .ninegag import NineGagIE from .noco import NocoIE from .normalboots import NormalbootsIE from .nosvideo import NosVideoIE from .nova import NovaIE from .novamov import NovaMovIE from .nowness import ( NownessIE, NownessPlaylistIE, NownessSeriesIE, ) from .nowtv import NowTVIE from .nowvideo import NowVideoIE from .npo import ( NPOIE, NPOLiveIE, NPORadioIE, NPORadioFragmentIE, VPROIE, WNLIE ) from .nrk import ( NRKIE, NRKPlaylistIE, NRKTVIE, ) from .ntvde import NTVDeIE from .ntvru import NTVRuIE from .nytimes import ( NYTimesIE, NYTimesArticleIE, ) from .nuvid import NuvidIE from .odnoklassniki import OdnoklassnikiIE from .oktoberfesttv import OktoberfestTVIE from .onionstudios import OnionStudiosIE from .ooyala import ( OoyalaIE, OoyalaExternalIE, ) from .orf import ( ORFTVthekIE, ORFOE1IE, ORFFM4IE, ORFIPTVIE, ) from .parliamentliveuk import ParliamentLiveUKIE from .patreon import PatreonIE from .pbs import PBSIE from .periscope import ( PeriscopeIE, QuickscopeIE, ) from .philharmoniedeparis import PhilharmonieDeParisIE from .phoenix import PhoenixIE from .photobucket import PhotobucketIE from .pinkbike import PinkbikeIE from .planetaplay import PlanetaPlayIE from .pladform import PladformIE from .played import PlayedIE from .playfm import PlayFMIE from .playtvak import PlaytvakIE from .playvid import PlayvidIE from .playwire import PlaywireIE from .pluralsight import ( PluralsightIE, PluralsightCourseIE, ) from .podomatic import PodomaticIE from .porn91 import Porn91IE from .pornhd import PornHdIE from .pornhub import ( PornHubIE, PornHubPlaylistIE, ) from .pornotube import PornotubeIE from .pornovoisines import PornoVoisinesIE from .pornoxo import PornoXOIE from .primesharetv import PrimeShareTVIE from .promptfile import PromptFileIE from .prosiebensat1 import ProSiebenSat1IE from .puls4 import Puls4IE from .pyvideo import PyvideoIE from .qqmusic import ( QQMusicIE, QQMusicSingerIE, QQMusicAlbumIE, QQMusicToplistIE, QQMusicPlaylistIE, ) from .quickvid import QuickVidIE from .r7 import R7IE from .radiode import RadioDeIE from .radiojavan import RadioJavanIE from .radiobremen import RadioBremenIE from .radiofrance import RadioFranceIE from .rai import RaiIE from .rbmaradio import RBMARadioIE from .rds import RDSIE from .redtube import RedTubeIE from .restudy import RestudyIE from .reverbnation import ReverbNationIE from .ringtv import RingTVIE from .ro220 import Ro220IE from .rottentomatoes import RottenTomatoesIE from .roxwel import RoxwelIE from .rtbf import RTBFIE from .rte import RteIE from .rtlnl import RtlNlIE from .rtl2 import RTL2IE from .rtp import RTPIE from .rts import RTSIE from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE from .rtvnh import RTVNHIE from .ruhd import RUHDIE from .rutube import ( RutubeIE, RutubeChannelIE, RutubeEmbedIE, RutubeMovieIE, RutubePersonIE, ) from .rutv import RUTVIE from .ruutu import RuutuIE from .sandia import SandiaIE from .safari import ( SafariIE, SafariCourseIE, ) from .sapo import SapoIE from .savefrom import SaveFromIE from .sbs import SBSIE from .scivee import SciVeeIE from .screencast import ScreencastIE from .screencastomatic import ScreencastOMaticIE from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE from .senateisvp import SenateISVPIE from .servingsys import ServingSysIE from .sexu import SexuIE from .sexykarma import SexyKarmaIE from .shahid import ShahidIE from .shared import SharedIE from .sharesix import ShareSixIE from .sina import SinaIE from .slideshare import SlideshareIE from .slutload import SlutloadIE from .smotri import ( SmotriIE, SmotriCommunityIE, SmotriUserIE, SmotriBroadcastIE, ) from .snagfilms import ( SnagFilmsIE, SnagFilmsEmbedIE, ) from .snotr import SnotrIE from .sohu import SohuIE from .soompi import ( SoompiIE, SoompiShowIE, ) from .soundcloud import ( SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE, SoundcloudPlaylistIE ) from .soundgasm import ( SoundgasmIE, SoundgasmProfileIE ) from .southpark import ( SouthParkIE, SouthParkDeIE, SouthParkDkIE, SouthParkEsIE, SouthParkNlIE ) from .space import SpaceIE from .spankbang import SpankBangIE from .spankwire import SpankwireIE from .spiegel import SpiegelIE, SpiegelArticleIE from .spiegeltv import SpiegeltvIE from .spike import SpikeIE from .sport5 import Sport5IE from .sportbox import ( SportBoxIE, SportBoxEmbedIE, ) from .sportdeutschland import SportDeutschlandIE from .srf import SrfIE from .srmediathek import SRMediathekIE from .ssa import SSAIE from .stanfordoc import StanfordOpenClassroomIE from .steam import SteamIE from .streamcloud import StreamcloudIE from .streamcz import StreamCZIE from .streetvoice import StreetVoiceIE from .sunporno import SunPornoIE from .svt import ( SVTIE, SVTPlayIE, ) from .swrmediathek import SWRMediathekIE from .syfy import SyfyIE from .sztvhu import SztvHuIE from .tagesschau import TagesschauIE from .tapely import TapelyIE from .tass import TassIE from .teachertube import ( TeacherTubeIE, TeacherTubeUserIE, ) from .teachingchannel import TeachingChannelIE from .teamcoco import TeamcocoIE from .techtalks import TechTalksIE from .ted import TEDIE from .telebruxelles import TeleBruxellesIE from .telecinco import TelecincoIE from .telegraaf import TelegraafIE from .telemb import TeleMBIE from .teletask import TeleTaskIE from .tenplay import TenPlayIE from .testurl import TestURLIE from .testtube import TestTubeIE from .tf1 import TF1IE from .theonion import TheOnionIE from .theplatform import ( ThePlatformIE, ThePlatformFeedIE, ) from .thesixtyone import TheSixtyOneIE from .thisamericanlife import ThisAmericanLifeIE from .thisav import ThisAVIE from .tinypic import TinyPicIE from .tlc import TlcIE, TlcDeIE from .tmz import ( TMZIE, TMZArticleIE, ) from .tnaflix import ( TNAFlixIE, EMPFlixIE, MovieFapIE, ) from .thvideo import ( THVideoIE, THVideoPlaylistIE ) from .toutv import TouTvIE from .toypics import ToypicsUserIE, ToypicsIE from .traileraddict import TrailerAddictIE from .trilulilu import TriluliluIE from .trutube import TruTubeIE from .tube8 import Tube8IE from .tubitv import TubiTvIE from .tudou import TudouIE from .tumblr import TumblrIE from .tunein import TuneInIE from .turbo import TurboIE from .tutv import TutvIE from .tv2 import ( TV2IE, TV2ArticleIE, ) from .tv4 import TV4IE from .tvc import ( TVCIE, TVCArticleIE, ) from .tvigle import TvigleIE from .tvp import TvpIE, TvpSeriesIE from .tvplay import TVPlayIE from .tweakers import TweakersIE from .twentyfourvideo import TwentyFourVideoIE from .twentytwotracks import ( TwentyTwoTracksIE, TwentyTwoTracksGenreIE ) from .twitch import ( TwitchVideoIE, TwitchChapterIE, TwitchVodIE, TwitchProfileIE, TwitchPastBroadcastsIE, TwitchBookmarksIE, TwitchStreamIE, ) from .twitter import TwitterCardIE from .ubu import UbuIE from .udemy import ( UdemyIE, UdemyCourseIE ) from .udn import UDNEmbedIE from .ultimedia import UltimediaIE from .universalmusicfrance import UniversalMusicFranceIE from .unistra import UnistraIE from .urort import UrortIE from .ustream import UstreamIE, UstreamChannelIE from .varzesh3 import Varzesh3IE from .vbox7 import Vbox7IE from .veehd import VeeHDIE from .veoh import VeohIE from .vessel import VesselIE from .vesti import VestiIE from .vevo import VevoIE from .vgtv import ( BTArticleIE, BTVestlendingenIE, VGTVIE, ) from .vh1 import VH1IE from .vice import ViceIE from .viddler import ViddlerIE from .videodetective import VideoDetectiveIE from .videolecturesnet import VideoLecturesNetIE from .videofyme import VideofyMeIE from .videomega import VideoMegaIE from .videopremium import VideoPremiumIE from .videott import VideoTtIE from .videoweed import VideoWeedIE from .vidme import VidmeIE from .vidzi import VidziIE from .vier import VierIE, VierVideosIE from .viewster import ViewsterIE from .vimeo import ( VimeoIE, VimeoAlbumIE, VimeoChannelIE, VimeoGroupsIE, VimeoLikesIE, VimeoReviewIE, VimeoUserIE, VimeoWatchLaterIE, ) from .vimple import VimpleIE from .vine import ( VineIE, VineUserIE, ) from .viki import ( VikiIE, VikiChannelIE, ) from .vk import ( VKIE, VKUserVideosIE, ) from .vlive import VLiveIE from .vodlocker import VodlockerIE from .voicerepublic import VoiceRepublicIE from .vporn import VpornIE from .vrt import VRTIE from .vube import VubeIE from .vuclip import VuClipIE from .vulture import VultureIE from .walla import WallaIE from .washingtonpost import WashingtonPostIE from .wat import WatIE from .wayofthemaster import WayOfTheMasterIE from .wdr import ( WDRIE, WDRMobileIE, WDRMausIE, ) from .webofstories import ( WebOfStoriesIE, WebOfStoriesPlaylistIE, ) from .weibo import WeiboIE from .wimp import WimpIE from .wistia import WistiaIE from .worldstarhiphop import WorldStarHipHopIE from .wrzuta import WrzutaIE from .wsj import WSJIE from .xbef import XBefIE from .xboxclips import XboxClipsIE from .xhamster import ( XHamsterIE, XHamsterEmbedIE, ) from .xminus import XMinusIE from .xnxx import XNXXIE from .xstream import XstreamIE from .xtube import XTubeUserIE, XTubeIE from .xuite import XuiteIE from .xvideos import XVideosIE from .xxxymovies import XXXYMoviesIE from .yahoo import ( YahooIE, YahooSearchIE, ) from .yam import YamIE from .yandexmusic import ( YandexMusicTrackIE, YandexMusicAlbumIE, YandexMusicPlaylistIE, ) from .yesjapan import YesJapanIE from .yinyuetai import YinYueTaiIE from .ynet import YnetIE from .youjizz import YouJizzIE from .youku import YoukuIE from .youporn import YouPornIE from .yourupload import YourUploadIE from .youtube import ( YoutubeIE, YoutubeChannelIE, YoutubeFavouritesIE, YoutubeHistoryIE, YoutubePlaylistIE, YoutubeRecommendedIE, YoutubeSearchDateIE, YoutubeSearchIE, YoutubeSearchURLIE, YoutubeShowIE, YoutubeSubscriptionsIE, YoutubeTruncatedIDIE, YoutubeTruncatedURLIE, YoutubeUserIE, YoutubeWatchLaterIE, ) from .zapiks import ZapiksIE from .zdf import ZDFIE, ZDFChannelIE from .zingmp3 import ( ZingMp3SongIE, ZingMp3AlbumIE, ) _ALL_CLASSES = [ klass for name, klass in globals().items() if name.endswith('IE') and name != 'GenericIE' ] _ALL_CLASSES.append(GenericIE) def gen_extractors(): """ Return a list of an instance of every supported extractor. The order does matter; the first extractor matched is the one handling the URL. """ return [klass() for klass in _ALL_CLASSES] def list_extractors(age_limit): """ Return a list of extractors that are suitable for the given age, sorted by extractor ID. """ return sorted( filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()), key=lambda ie: ie.IE_NAME.lower()) def get_info_extractor(ie_name): """Returns the info extractor class with the given ie_name""" return globals()[ie_name + 'IE']
unlicense
uoaerg/linux-dccp
scripts/gdb/linux/proc.py
189
8604
# # gdb helper commands and functions for Linux kernel debugging # # Kernel proc information reader # # Copyright (c) 2016 Linaro Ltd # # Authors: # Kieran Bingham <kieran.bingham@linaro.org> # # This work is licensed under the terms of the GNU GPL version 2. # import gdb from linux import constants from linux import utils from linux import tasks from linux import lists from struct import * class LxCmdLine(gdb.Command): """ Report the Linux Commandline used in the current kernel. Equivalent to cat /proc/cmdline on a running target""" def __init__(self): super(LxCmdLine, self).__init__("lx-cmdline", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): gdb.write(gdb.parse_and_eval("saved_command_line").string() + "\n") LxCmdLine() class LxVersion(gdb.Command): """ Report the Linux Version of the current kernel. Equivalent to cat /proc/version on a running target""" def __init__(self): super(LxVersion, self).__init__("lx-version", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): # linux_banner should contain a newline gdb.write(gdb.parse_and_eval("linux_banner").string()) LxVersion() # Resource Structure Printers # /proc/iomem # /proc/ioports def get_resources(resource, depth): while resource: yield resource, depth child = resource['child'] if child: for res, deep in get_resources(child, depth + 1): yield res, deep resource = resource['sibling'] def show_lx_resources(resource_str): resource = gdb.parse_and_eval(resource_str) width = 4 if resource['end'] < 0x10000 else 8 # Iterate straight to the first child for res, depth in get_resources(resource['child'], 0): start = int(res['start']) end = int(res['end']) gdb.write(" " * depth * 2 + "{0:0{1}x}-".format(start, width) + "{0:0{1}x} : ".format(end, width) + res['name'].string() + "\n") class LxIOMem(gdb.Command): """Identify the IO memory resource locations defined by the kernel Equivalent to cat /proc/iomem on a running target""" def __init__(self): super(LxIOMem, self).__init__("lx-iomem", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): return show_lx_resources("iomem_resource") LxIOMem() class LxIOPorts(gdb.Command): """Identify the IO port resource locations defined by the kernel Equivalent to cat /proc/ioports on a running target""" def __init__(self): super(LxIOPorts, self).__init__("lx-ioports", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): return show_lx_resources("ioport_resource") LxIOPorts() # Mount namespace viewer # /proc/mounts def info_opts(lst, opt): opts = "" for key, string in lst.items(): if opt & key: opts += string return opts FS_INFO = {constants.LX_MS_SYNCHRONOUS: ",sync", constants.LX_MS_MANDLOCK: ",mand", constants.LX_MS_DIRSYNC: ",dirsync", constants.LX_MS_NOATIME: ",noatime", constants.LX_MS_NODIRATIME: ",nodiratime"} MNT_INFO = {constants.LX_MNT_NOSUID: ",nosuid", constants.LX_MNT_NODEV: ",nodev", constants.LX_MNT_NOEXEC: ",noexec", constants.LX_MNT_NOATIME: ",noatime", constants.LX_MNT_NODIRATIME: ",nodiratime", constants.LX_MNT_RELATIME: ",relatime"} mount_type = utils.CachedType("struct mount") mount_ptr_type = mount_type.get_type().pointer() class LxMounts(gdb.Command): """Report the VFS mounts of the current process namespace. Equivalent to cat /proc/mounts on a running target An integer value can be supplied to display the mount values of that process namespace""" def __init__(self): super(LxMounts, self).__init__("lx-mounts", gdb.COMMAND_DATA) # Equivalent to proc_namespace.c:show_vfsmnt # However, that has the ability to call into s_op functions # whereas we cannot and must make do with the information we can obtain. def invoke(self, arg, from_tty): argv = gdb.string_to_argv(arg) if len(argv) >= 1: try: pid = int(argv[0]) except: raise gdb.GdbError("Provide a PID as integer value") else: pid = 1 task = tasks.get_task_by_pid(pid) if not task: raise gdb.GdbError("Couldn't find a process with PID {}" .format(pid)) namespace = task['nsproxy']['mnt_ns'] if not namespace: raise gdb.GdbError("No namespace for current process") for vfs in lists.list_for_each_entry(namespace['list'], mount_ptr_type, "mnt_list"): devname = vfs['mnt_devname'].string() devname = devname if devname else "none" pathname = "" parent = vfs while True: mntpoint = parent['mnt_mountpoint'] pathname = utils.dentry_name(mntpoint) + pathname if (parent == parent['mnt_parent']): break parent = parent['mnt_parent'] if (pathname == ""): pathname = "/" superblock = vfs['mnt']['mnt_sb'] fstype = superblock['s_type']['name'].string() s_flags = int(superblock['s_flags']) m_flags = int(vfs['mnt']['mnt_flags']) rd = "ro" if (s_flags & constants.LX_MS_RDONLY) else "rw" gdb.write( "{} {} {} {}{}{} 0 0\n" .format(devname, pathname, fstype, rd, info_opts(FS_INFO, s_flags), info_opts(MNT_INFO, m_flags))) LxMounts() class LxFdtDump(gdb.Command): """Output Flattened Device Tree header and dump FDT blob to the filename specified as the command argument. Equivalent to 'cat /proc/fdt > fdtdump.dtb' on a running target""" def __init__(self): super(LxFdtDump, self).__init__("lx-fdtdump", gdb.COMMAND_DATA, gdb.COMPLETE_FILENAME) def fdthdr_to_cpu(self, fdt_header): fdt_header_be = ">IIIIIII" fdt_header_le = "<IIIIIII" if utils.get_target_endianness() == 1: output_fmt = fdt_header_le else: output_fmt = fdt_header_be return unpack(output_fmt, pack(fdt_header_be, fdt_header['magic'], fdt_header['totalsize'], fdt_header['off_dt_struct'], fdt_header['off_dt_strings'], fdt_header['off_mem_rsvmap'], fdt_header['version'], fdt_header['last_comp_version'])) def invoke(self, arg, from_tty): if not constants.LX_CONFIG_OF: raise gdb.GdbError("Kernel not compiled with CONFIG_OF\n") if len(arg) == 0: filename = "fdtdump.dtb" else: filename = arg py_fdt_header_ptr = gdb.parse_and_eval( "(const struct fdt_header *) initial_boot_params") py_fdt_header = py_fdt_header_ptr.dereference() fdt_header = self.fdthdr_to_cpu(py_fdt_header) if fdt_header[0] != constants.LX_OF_DT_HEADER: raise gdb.GdbError("No flattened device tree magic found\n") gdb.write("fdt_magic: 0x{:02X}\n".format(fdt_header[0])) gdb.write("fdt_totalsize: 0x{:02X}\n".format(fdt_header[1])) gdb.write("off_dt_struct: 0x{:02X}\n".format(fdt_header[2])) gdb.write("off_dt_strings: 0x{:02X}\n".format(fdt_header[3])) gdb.write("off_mem_rsvmap: 0x{:02X}\n".format(fdt_header[4])) gdb.write("version: {}\n".format(fdt_header[5])) gdb.write("last_comp_version: {}\n".format(fdt_header[6])) inf = gdb.inferiors()[0] fdt_buf = utils.read_memoryview(inf, py_fdt_header_ptr, fdt_header[1]).tobytes() try: f = open(filename, 'wb') except: raise gdb.GdbError("Could not open file to dump fdt") f.write(fdt_buf) f.close() gdb.write("Dumped fdt blob to " + filename + "\n") LxFdtDump()
gpl-2.0
ping/twython
tests/config.py
2
15296
import os import sys if sys.version_info[0] == 2 and sys.version_info[1] == 6: import unittest2 as unittest else: import unittest app_key = os.environ.get('APP_KEY') app_secret = os.environ.get('APP_SECRET') oauth_token = os.environ.get('OAUTH_TOKEN') oauth_token_secret = os.environ.get('OAUTH_TOKEN_SECRET') access_token = os.environ.get('ACCESS_TOKEN') screen_name = os.environ.get('SCREEN_NAME', '__twython__') # Protected Account you ARE following and they ARE following you protected_twitter_1 = os.environ.get('PROTECTED_TWITTER_1', 'TwythonSecure1') # Protected Account you ARE NOT following protected_twitter_2 = os.environ.get('PROTECTED_TWITTER_2', 'TwythonSecure2') # Test Ids test_tweet_id = os.environ.get('TEST_TWEET_ID', '318577428610031617') test_list_slug = os.environ.get('TEST_LIST_SLUG', 'team') test_list_owner_screen_name = os.environ.get('TEST_LIST_OWNER_SCREEN_NAME', 'twitterapi') test_tweet_object = {u'contributors': None, u'truncated': False, u'text': u'http://t.co/FCmXyI6VHd is a #cool site, lol! @mikehelmick shd #checkitout. Love, @__twython__ https://t.co/67pwRvY6z9 http://t.co/N6InAO4B71', u'in_reply_to_status_id': None, u'id': 349683012054683648, u'favorite_count': 0, u'source': u'web', u'retweeted': False, u'coordinates': None, u'entities': {u'symbols': [], u'user_mentions': [{u'id': 29251354, u'indices': [45, 57], u'id_str': u'29251354', u'screen_name': u'mikehelmick', u'name': u'Mike Helmick'}, {u'id': 1431865928, u'indices': [81, 93], u'id_str': u'1431865928', u'screen_name': u'__twython__', u'name': u'Twython'}], u'hashtags': [{u'indices': [28, 33], u'text': u'cool'}, {u'indices': [62, 73], u'text': u'checkitout'}], u'urls': [{u'url': u'http://t.co/FCmXyI6VHd', u'indices': [0, 22], u'expanded_url': u'http://google.com', u'display_url': u'google.com'}, {u'url': u'https://t.co/67pwRvY6z9', u'indices': [94, 117], u'expanded_url': u'https://github.com', u'display_url': u'github.com'}], u'media': [{u'id': 537884378513162240, u'id_str': u'537884378513162240', u'indices': [118, 140], u'media_url': u'http://pbs.twimg.com/media/B3by_g-CQAAhrO5.jpg', u'media_url_https': u'https://pbs.twimg.com/media/B3by_g-CQAAhrO5.jpg', u'url': u'http://t.co/N6InAO4B71', u'display_url': u'pic.twitter.com/N6InAO4B71', u'expanded_url': u'http://twitter.com/pingofglitch/status/537884380060844032/photo/1', u'type': u'photo', u'sizes': {u'large': {u'w': 1024, u'h': 640, u'resize': u'fit'}, u'thumb': {u'w': 150, u'h': 150, u'resize': u'crop'}, u'medium': {u'w': 600, u'h': 375, u'resize': u'fit'}, u'small': {u'w': 340, u'h': 212, u'resize': u'fit'}}}]}, u'in_reply_to_screen_name': None, u'id_str': u'349683012054683648', u'retweet_count': 0, u'in_reply_to_user_id': None, u'favorited': False, u'user': {u'follow_request_sent': False, u'profile_use_background_image': True, u'default_profile_image': True, u'id': 1431865928, u'verified': False, u'profile_text_color': u'333333', u'profile_image_url_https': u'https://si0.twimg.com/sticky/default_profile_images/default_profile_3_normal.png', u'profile_sidebar_fill_color': u'DDEEF6', u'entities': {u'description': {u'urls': []}}, u'followers_count': 1, u'profile_sidebar_border_color': u'C0DEED', u'id_str': u'1431865928', u'profile_background_color': u'3D3D3D', u'listed_count': 0, u'profile_background_image_url_https': u'https://si0.twimg.com/images/themes/theme1/bg.png', u'utc_offset': None, u'statuses_count': 2, u'description': u'', u'friends_count': 1, u'location': u'', u'profile_link_color': u'0084B4', u'profile_image_url': u'http://a0.twimg.com/sticky/default_profile_images/default_profile_3_normal.png', u'following': False, u'geo_enabled': False, u'profile_background_image_url': u'http://a0.twimg.com/images/themes/theme1/bg.png', u'screen_name': u'__twython__', u'lang': u'en', u'profile_background_tile': False, u'favourites_count': 0, u'name': u'Twython', u'notifications': False, u'url': None, u'created_at': u'Thu May 16 01:11:09 +0000 2013', u'contributors_enabled': False, u'time_zone': None, u'protected': False, u'default_profile': False, u'is_translator': False}, u'geo': None, u'in_reply_to_user_id_str': None, u'possibly_sensitive': False, u'lang': u'en', u'created_at': u'Wed Jun 26 00:18:21 +0000 2013', u'in_reply_to_status_id_str': None, u'place': None} test_tweet_html = '<a href="http://t.co/FCmXyI6VHd" class="twython-url">google.com</a> is a <a href="https://twitter.com/search?q=%23cool" class="twython-hashtag">#cool</a> site, lol! <a href="https://twitter.com/mikehelmick" class="twython-mention">@mikehelmick</a> shd <a href="https://twitter.com/search?q=%23checkitout" class="twython-hashtag">#checkitout</a>. Love, <a href="https://twitter.com/__twython__" class="twython-mention">@__twython__</a> <a href="https://t.co/67pwRvY6z9" class="twython-url">github.com</a> <a href="http://t.co/N6InAO4B71" class="twython-media">pic.twitter.com/N6InAO4B71</a>' test_tweet_symbols_object = {u'text': u'Some symbols: $AAPL and $PEP and $ANOTHER and $A.', u'contributors': None, u'geo': None, u'favorited': True, u'in_reply_to_user_id_str': None, u'user': {u'location': u'', u'id_str': u'2030131', u'protected': False, u'profile_background_tile': False, u'friends_count': 18, u'profile_background_image_url_https': u'https://abs.twimg.com/images/themes/theme1/bg.png', u'entities': {u'description': {u'urls': []}}, u'lang': u'en', u'listed_count': 5, u'default_profile_image': True, u'default_profile': False, u'statuses_count': 447, u'notifications': False, u'profile_background_color': u'9AE4E8', u'profile_sidebar_fill_color': u'E0FF92', u'profile_link_color': u'0000FF', u'profile_image_url_https': u'https://abs.twimg.com/sticky/default_profile_images/default_profile_5_normal.png', u'followers_count': 8, u'geo_enabled': True, u'following': True, u'has_extended_profile': False, u'profile_use_background_image': True, u'profile_text_color': u'000000', u'screen_name': u'philgyfordtest', u'contributors_enabled': False, u'verified': False, u'name': u'Phil Gyford Test', u'profile_sidebar_border_color': u'000000', u'utc_offset': 0, u'profile_image_url': u'http://abs.twimg.com/sticky/default_profile_images/default_profile_5_normal.png', u'id': 2030131, u'favourites_count': 0, u'time_zone': u'London', u'url': None, u'is_translation_enabled': False, u'is_translator': False, u'profile_background_image_url': u'http://abs.twimg.com/images/themes/theme1/bg.png', u'description': u'', u'created_at': u'Fri Mar 23 16:56:52 +0000 2007', u'follow_request_sent': False}, u'in_reply_to_user_id': None, u'retweeted': False, u'coordinates': None, u'place': None, u'in_reply_to_status_id': None, u'lang': u'en', u'in_reply_to_status_id_str': None, u'truncated': False, u'retweet_count': 0, u'is_quote_status': False, u'id': 662694880657989632, u'id_str': u'662694880657989632', u'in_reply_to_screen_name': None, u'favorite_count': 1, u'entities': {u'hashtags': [], u'user_mentions': [], u'symbols': [{u'indices': [14, 19], u'text': u'AAPL'}, {u'indices': [24, 28], u'text': u'PEP'}, {u'indices': [46, 48], u'text': u'A'}], u'urls': []}, u'created_at': u'Fri Nov 06 18:15:46 +0000 2015', u'source': u'<a href="http://tapbots.com/software/tweetbot/mac" rel="nofollow">Tweetbot for Mac</a>'} test_tweet_compat_object = {u'contributors': None, u'truncated': True, u'text': u"Say more about what's happening! Rolling out now: photos, videos, GIFs, polls, and Quote Tweets no longer count tow\u2026 https://t.co/SRmsuks2ru", u'is_quote_status': False, u'in_reply_to_status_id': None, u'id': 777915304261193728, u'favorite_count': 13856, u'source': u'<a href="http://twitter.com" rel="nofollow">Twitter Web Client</a>', u'retweeted': False, u'coordinates': None, u'entities': {u'symbols': [], u'user_mentions': [], u'hashtags': [], u'urls': [{u'url': u'https://t.co/SRmsuks2ru', u'indices': [117, 140], u'expanded_url': u'https://twitter.com/i/web/status/777915304261193728', u'display_url': u'twitter.com/i/web/status/7\u2026'}]}, u'in_reply_to_screen_name': None, u'id_str': u'777915304261193728', u'retweet_count': 14767, u'in_reply_to_user_id': None, u'favorited': False, u'user': {u'follow_request_sent': False, u'has_extended_profile': False, u'profile_use_background_image': True, u'id': 783214, u'verified': True, u'profile_text_color': u'333333', u'profile_image_url_https': u'https://pbs.twimg.com/profile_images/767879603977191425/29zfZY6I_normal.jpg', u'profile_sidebar_fill_color': u'F6F6F6', u'is_translator': False, u'geo_enabled': True, u'entities': {u'url': {u'urls': [{u'url': u'http://t.co/5iRhy7wTgu', u'indices': [0, 22], u'expanded_url': u'http://blog.twitter.com/', u'display_url': u'blog.twitter.com'}]}, u'description': {u'urls': [{u'url': u'https://t.co/qq1HEzvnrA', u'indices': [84, 107], u'expanded_url': u'http://support.twitter.com', u'display_url': u'support.twitter.com'}]}}, u'followers_count': 56827498, u'protected': False, u'location': u'San Francisco, CA', u'default_profile_image': False, u'id_str': u'783214', u'lang': u'en', u'utc_offset': -25200, u'statuses_count': 3161, u'description': u'Your official source for news, updates and tips from Twitter, Inc. Need help? Visit https://t.co/qq1HEzvnrA.', u'friends_count': 145, u'profile_link_color': u'226699', u'profile_image_url': u'http://pbs.twimg.com/profile_images/767879603977191425/29zfZY6I_normal.jpg', u'notifications': False, u'profile_background_image_url_https': u'https://pbs.twimg.com/profile_background_images/657090062/l1uqey5sy82r9ijhke1i.png', u'profile_background_color': u'ACDED6', u'profile_banner_url': u'https://pbs.twimg.com/profile_banners/783214/1471929200', u'profile_background_image_url': u'http://pbs.twimg.com/profile_background_images/657090062/l1uqey5sy82r9ijhke1i.png', u'name': u'Twitter', u'is_translation_enabled': False, u'profile_background_tile': True, u'favourites_count': 2332, u'screen_name': u'twitter', u'url': u'http://t.co/5iRhy7wTgu', u'created_at': u'Tue Feb 20 14:35:54 +0000 2007', u'contributors_enabled': False, u'time_zone': u'Pacific Time (US & Canada)', u'profile_sidebar_border_color': u'FFFFFF', u'default_profile': False, u'following': False, u'listed_count': 90445}, u'geo': None, u'in_reply_to_user_id_str': None, u'possibly_sensitive': False, u'possibly_sensitive_appealable': False, u'lang': u'en', u'created_at': u'Mon Sep 19 17:00:36 +0000 2016', u'in_reply_to_status_id_str': None, u'place': None} test_tweet_extended_object = {u'full_text': u"Say more about what's happening! Rolling out now: photos, videos, GIFs, polls, and Quote Tweets no longer count toward your 140 characters. https://t.co/I9pUC0NdZC", u'truncated': False, u'is_quote_status': False, u'in_reply_to_status_id': None, u'id': 777915304261193728, u'favorite_count': 13856, u'contributors': None, u'source': u'<a href="http://twitter.com" rel="nofollow">Twitter Web Client</a>', u'retweeted': False, u'coordinates': None, u'entities': {u'symbols': [], u'user_mentions': [], u'hashtags': [], u'urls': [], u'media': [{u'expanded_url': u'https://twitter.com/twitter/status/777915304261193728/photo/1', u'sizes': {u'small': {u'h': 340, u'w': 340, u'resize': u'fit'}, u'large': {u'h': 700, u'w': 700, u'resize': u'fit'}, u'medium': {u'h': 600, u'w': 600, u'resize': u'fit'}, u'thumb': {u'h': 150, u'w': 150, u'resize': u'crop'}}, u'url': u'https://t.co/I9pUC0NdZC', u'media_url_https': u'https://pbs.twimg.com/tweet_video_thumb/Csu1TzEVMAAAEv7.jpg', u'id_str': u'777914712382058496', u'indices': [140, 163], u'media_url': u'http://pbs.twimg.com/tweet_video_thumb/Csu1TzEVMAAAEv7.jpg', u'type': u'photo', u'id': 777914712382058496, u'display_url': u'pic.twitter.com/I9pUC0NdZC'}]}, u'in_reply_to_screen_name': None, u'id_str': u'777915304261193728', u'display_text_range': [0, 139], u'retweet_count': 14767, u'in_reply_to_user_id': None, u'favorited': False, u'user': {u'follow_request_sent': False, u'has_extended_profile': False, u'profile_use_background_image': True, u'id': 783214, u'verified': True, u'profile_text_color': u'333333', u'profile_image_url_https': u'https://pbs.twimg.com/profile_images/767879603977191425/29zfZY6I_normal.jpg', u'profile_sidebar_fill_color': u'F6F6F6', u'is_translator': False, u'geo_enabled': True, u'entities': {u'url': {u'urls': [{u'url': u'http://t.co/5iRhy7wTgu', u'indices': [0, 22], u'expanded_url': u'http://blog.twitter.com/', u'display_url': u'blog.twitter.com'}]}, u'description': {u'urls': [{u'url': u'https://t.co/qq1HEzvnrA', u'indices': [84, 107], u'expanded_url': u'http://support.twitter.com', u'display_url': u'support.twitter.com'}]}}, u'followers_count': 56827498, u'protected': False, u'location': u'San Francisco, CA', u'default_profile_image': False, u'id_str': u'783214', u'lang': u'en', u'utc_offset': -25200, u'statuses_count': 3161, u'description': u'Your official source for news, updates and tips from Twitter, Inc. Need help? Visit https://t.co/qq1HEzvnrA.', u'friends_count': 145, u'profile_link_color': u'226699', u'profile_image_url': u'http://pbs.twimg.com/profile_images/767879603977191425/29zfZY6I_normal.jpg', u'notifications': False, u'profile_background_image_url_https': u'https://pbs.twimg.com/profile_background_images/657090062/l1uqey5sy82r9ijhke1i.png', u'profile_background_color': u'ACDED6', u'profile_banner_url': u'https://pbs.twimg.com/profile_banners/783214/1471929200', u'profile_background_image_url': u'http://pbs.twimg.com/profile_background_images/657090062/l1uqey5sy82r9ijhke1i.png', u'name': u'Twitter', u'is_translation_enabled': False, u'profile_background_tile': True, u'favourites_count': 2332, u'screen_name': u'twitter', u'url': u'http://t.co/5iRhy7wTgu', u'created_at': u'Tue Feb 20 14:35:54 +0000 2007', u'contributors_enabled': False, u'time_zone': u'Pacific Time (US & Canada)', u'profile_sidebar_border_color': u'FFFFFF', u'default_profile': False, u'following': False, u'listed_count': 90445}, u'geo': None, u'in_reply_to_user_id_str': None, u'possibly_sensitive': False, u'possibly_sensitive_appealable': False, u'lang': u'en', u'created_at': u'Mon Sep 19 17:00:36 +0000 2016', u'in_reply_to_status_id_str': None, u'place': None, u'extended_entities': {u'media': [{u'expanded_url': u'https://twitter.com/twitter/status/777915304261193728/photo/1', u'display_url': u'pic.twitter.com/I9pUC0NdZC', u'url': u'https://t.co/I9pUC0NdZC', u'media_url_https': u'https://pbs.twimg.com/tweet_video_thumb/Csu1TzEVMAAAEv7.jpg', u'video_info': {u'aspect_ratio': [1, 1], u'variants': [{u'url': u'https://pbs.twimg.com/tweet_video/Csu1TzEVMAAAEv7.mp4', u'bitrate': 0, u'content_type': u'video/mp4'}]}, u'id_str': u'777914712382058496', u'sizes': {u'small': {u'h': 340, u'w': 340, u'resize': u'fit'}, u'large': {u'h': 700, u'w': 700, u'resize': u'fit'}, u'medium': {u'h': 600, u'w': 600, u'resize': u'fit'}, u'thumb': {u'h': 150, u'w': 150, u'resize': u'crop'}}, u'indices': [140, 163], u'type': u'animated_gif', u'id': 777914712382058496, u'media_url': u'http://pbs.twimg.com/tweet_video_thumb/Csu1TzEVMAAAEv7.jpg'}]}} test_tweet_extended_html = 'Say more about what\'s happening! Rolling out now: photos, videos, GIFs, polls, and Quote Tweets no longer count toward your 140 characters.<span class="twython-tweet-suffix"> <a href="https://t.co/I9pUC0NdZC" class="twython-media">pic.twitter.com/I9pUC0NdZC</a></span>'
mit
jsayles/philometer
serialCapture.py
1
1108
import os import sys import glob import serial import time from datetime import datetime BAUDRATE=9600 DATA_FILE="/tmp/serialData.txt" def getCOM(): # Assume the first /dev/tty.usb* is our COM ports = glob.glob("/dev/tty.usb*") if len(ports) >= 1: return ports[0] return None def monitor(): wait_for_it = False comport = getCOM() if not comport: wait_for_it = True print "Connect the Philometer..." while not comport: time.sleep(1) comport = getCOM() datafile = "data/%s.txt" % datetime.now().isoformat()[:16] print "Capturing %s to %s" % (comport, datafile) ser = serial.Serial(comport, BAUDRATE, timeout=0) if wait_for_it: sys.stdout.write("Waiting for device to register") for i in range(10): sys.stdout.write('.') sys.stdout.flush() time.sleep(1) print "Done! \n" print "Collecting data..." while (1): line = ser.readline() if (line != ""): text_file = open(datafile, "a") text_file.write(line) text_file.close() monitor()
gpl-3.0
luotao1/Paddle
python/paddle/fluid/tests/unittests/test_subtract_op.py
2
4943
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np import paddle import paddle.fluid.core as core class ApiSubtractTest(unittest.TestCase): def setUp(self): if core.is_compiled_with_cuda(): self.place = core.CUDAPlace(0) else: self.place = core.CPUPlace() self.input_x = np.random.rand(10, 15).astype("float32") self.input_y = np.random.rand(10, 15).astype("float32") self.input_z = np.random.rand(15).astype("float32") self.input_a = np.array([0, np.nan, np.nan]).astype('int64') self.input_b = np.array([2, np.inf, -np.inf]).astype('int64') self.input_c = np.array([4, 1, 3]).astype('int64') self.np_expected1 = np.subtract(self.input_x, self.input_y) self.np_expected2 = np.subtract(self.input_x, self.input_z) self.np_expected3 = np.subtract(self.input_a, self.input_c) self.np_expected4 = np.subtract(self.input_b, self.input_c) def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_y = paddle.static.data("y", shape=[10, 15], dtype="float32") result_max = paddle.subtract(data_x, data_y) exe = paddle.static.Executor(self.place) res, = exe.run(feed={"x": self.input_x, "y": self.input_y}, fetch_list=[result_max]) self.assertTrue(np.allclose(res, self.np_expected1)) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_z = paddle.static.data("z", shape=[15], dtype="float32") result_max = paddle.subtract(data_x, data_z) exe = paddle.static.Executor(self.place) res, = exe.run(feed={"x": self.input_x, "z": self.input_z}, fetch_list=[result_max]) self.assertTrue(np.allclose(res, self.np_expected2)) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): data_a = paddle.static.data("a", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_max = paddle.subtract(data_a, data_c) exe = paddle.static.Executor(self.place) res, = exe.run(feed={"a": self.input_a, "c": self.input_c}, fetch_list=[result_max]) self.assertTrue(np.allclose(res, self.np_expected3)) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): data_b = paddle.static.data("b", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_max = paddle.subtract(data_b, data_c) exe = paddle.static.Executor(self.place) res, = exe.run(feed={"b": self.input_b, "c": self.input_c}, fetch_list=[result_max]) self.assertTrue(np.allclose(res, self.np_expected4)) def test_dynamic_api(self): paddle.disable_static() x = paddle.to_tensor(self.input_x) y = paddle.to_tensor(self.input_y) z = paddle.to_tensor(self.input_z) a = paddle.to_tensor(self.input_a) b = paddle.to_tensor(self.input_b) c = paddle.to_tensor(self.input_c) res = paddle.subtract(x, y) res = res.numpy() self.assertTrue(np.allclose(res, self.np_expected1)) # test broadcast res = paddle.subtract(x, z) res = res.numpy() self.assertTrue(np.allclose(res, self.np_expected2)) res = paddle.subtract(a, c) res = res.numpy() self.assertTrue(np.allclose(res, self.np_expected3)) res = paddle.subtract(b, c) res = res.numpy() self.assertTrue(np.allclose(res, self.np_expected4))
apache-2.0
nbeckman/nolacoaster
KeepPhotoDump/keep_photo_dump.py
1
3817
#! /usr/bin/python # Keep Photo Dump is meant to get your photos out of Google Keep pages # as exported from Google Takeout. # # The photos are base64 encoded. # This script goes through a given html file and finds all the base64 # encoded image tags and writes them to disk as JPG. It should work just # as well for extracting photos out of any web page with base64 encoded images. # May use lots of RAM! # # Usage: # ./keep_photo_dump.py my_keep_file.html # import argparse import base64 import os from HTMLParser import HTMLParser # Writes a base64 encoded string to a file, as binary. # # Args: # base64_img: An image encoded as base 64. # filename: The name of the file to write to. def decode_and_write_file(base64_img, filename): decoded_img = base64.b64decode(base64_img) with open(filename, 'wb') as file: file.write(decoded_img) # The string that corresponds to a base64 encoded jpg. base64_jpg_prefix = "data:image/jpeg;base64" # Is the given stirng a base64 encoded image from the <img src=""> tag # and attribute? def is_base64_encoded_src(src): # I'm assuming there can be other image types that I might want to # decode here. But for now, we only recognize this one string. return src.startswith(base64_jpg_prefix) # For an encoded image src attribute, returns the encoding type and # the data as a part. def encoding_data_pair(src): partition = src.partition(',') return (partition[0], partition[2]) # Returns the value of the src attribute or None. # Args: # attrs: A list of (attribute name, value) pairs. # Returns: # The value corresponding to 'src' or None if src is not in the list. def find_source_attribute(attrs): for (attr_name, attr_value) in attrs: if attr_name == "src": return attr_value return None # This class is an HTML parser that looks for <img> tags containing # base64 images (i.e., those whose source starts with # "data:image/jpeg;base64,"). When the feed() method completes on an # HTML string, the encoding_pairs() method will return those images. class Base64ImgGrabber(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.__encoding_pairs = [] def handle_starttag(self, tag, attrs): if tag == "img": src_value = find_source_attribute(attrs) if src_value and is_base64_encoded_src(src_value): encoding_pair = encoding_data_pair(src_value) self.__encoding_pairs.append(encoding_pair) # Returns the images and their encodings seen during parsing. # # Returns: # A list of pairs. The key in each pair is the encoding type. The value # is the data. def encoding_pairs(self): return self.__encoding_pairs # From a path, returns the file name without extension or path. # "/a/b/c.txt" --> "c" def file_base(path): base = os.path.basename(path) return os.path.splitext(base)[0] # Parses the html file, dumps all its images to disk. # # Args: # html_file: The html file to parse. # file_suffix: When writing image files to disk, use this suffix. def parse_html_dump_images(html_file, file_suffix): html_parser = Base64ImgGrabber() html_parser.feed(html_file.read()) file_index = 1 for (encoding, data) in html_parser.encoding_pairs(): filename = file_base(html_file.name) + str(file_index) + file_suffix decode_and_write_file(data, filename) file_index = file_index + 1 # The "main" function. # # Set up flags. parser = argparse.ArgumentParser(description=""" In a Google Keep HTML file, finds all the images encoded as base64 and writes them to disk as JPG images. """) parser.add_argument('keep_html_file', metavar='keep_html_file', type=file, help='the input html file path') args = parser.parse_args() # Parse html and dump. parse_html_dump_images(args.keep_html_file, ".jpg")
bsd-3-clause
wemanuel/smry
server-auth/ls/google-cloud-sdk/.install/.backup/lib/dns/rdtypes/ANY/CNAME.py
248
1092
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import dns.rdtypes.nsbase class CNAME(dns.rdtypes.nsbase.NSBase): """CNAME record Note: although CNAME is officially a singleton type, dnspython allows non-singleton CNAME rdatasets because such sets have been commonly used by BIND and other nameservers for load balancing.""" pass
apache-2.0
Juraci/tempest
tempest/api/compute/test_quotas.py
9
3658
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.compute import base from tempest.common import tempest_fixtures as fixtures from tempest import test class QuotasTestJSON(base.BaseV2ComputeTest): @classmethod def skip_checks(cls): super(QuotasTestJSON, cls).skip_checks() if not test.is_extension_enabled('os-quota-sets', 'compute'): msg = "quotas extension not enabled." raise cls.skipException(msg) def setUp(self): # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests. self.useFixture(fixtures.LockFixture('compute_quotas')) super(QuotasTestJSON, self).setUp() @classmethod def setup_clients(cls): super(QuotasTestJSON, cls).setup_clients() cls.client = cls.quotas_client @classmethod def resource_setup(cls): super(QuotasTestJSON, cls).resource_setup() cls.tenant_id = cls.client.tenant_id cls.user_id = cls.client.user_id cls.default_quota_set = set(('injected_file_content_bytes', 'metadata_items', 'injected_files', 'ram', 'floating_ips', 'fixed_ips', 'key_pairs', 'injected_file_path_bytes', 'instances', 'security_group_rules', 'cores', 'security_groups')) @test.idempotent_id('f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107') def test_get_quotas(self): # User can get the quota set for it's tenant expected_quota_set = self.default_quota_set | set(['id']) quota_set = self.client.show_quota_set(self.tenant_id) self.assertEqual(quota_set['id'], self.tenant_id) for quota in expected_quota_set: self.assertIn(quota, quota_set.keys()) # get the quota set using user id quota_set = self.client.show_quota_set(self.tenant_id, self.user_id) self.assertEqual(quota_set['id'], self.tenant_id) for quota in expected_quota_set: self.assertIn(quota, quota_set.keys()) @test.idempotent_id('9bfecac7-b966-4f47-913f-1a9e2c12134a') def test_get_default_quotas(self): # User can get the default quota set for it's tenant expected_quota_set = self.default_quota_set | set(['id']) quota_set = self.client.show_default_quota_set(self.tenant_id) self.assertEqual(quota_set['id'], self.tenant_id) for quota in expected_quota_set: self.assertIn(quota, quota_set.keys()) @test.idempotent_id('cd65d997-f7e4-4966-a7e9-d5001b674fdc') def test_compare_tenant_quotas_with_default_quotas(self): # Tenants are created with the default quota values defualt_quota_set = \ self.client.show_default_quota_set(self.tenant_id) tenant_quota_set = self.client.show_quota_set(self.tenant_id) self.assertEqual(defualt_quota_set, tenant_quota_set)
apache-2.0
ybellavance/python-for-android
python-modules/twisted/twisted/python/zshcomp.py
61
27579
# -*- test-case-name: twisted.test.test_zshcomp -*- # Copyright (c) 2006 Twisted Matrix Laboratories. # See LICENSE for details. """ Rebuild the completion functions for the currently active version of Twisted:: $ python zshcomp.py -i This module implements a zsh code generator which generates completion code for commands that use twisted.python.usage. This is the stuff that makes pressing Tab at the command line work. Maintainer: Eric Mangold To build completion functions for your own commands, and not Twisted commands, then just do something like this:: o = mymodule.MyOptions() f = file('_mycommand', 'w') Builder("mycommand", o, f).write() Then all you have to do is place the generated file somewhere in your C{$fpath}, and restart zsh. Note the "site-functions" directory in your C{$fpath} where you may install 3rd-party completion functions (like the one you're building). Call C{siteFunctionsPath} to locate this directory programmatically. SPECIAL CLASS VARIABLES. You may set these on your usage.Options subclass:: zsh_altArgDescr zsh_multiUse zsh_mutuallyExclusive zsh_actions zsh_actionDescr zsh_extras Here is what they mean (with examples):: zsh_altArgDescr = {"foo":"use this description for foo instead"} A dict mapping long option names to alternate descriptions. When this variable is present, the descriptions contained here will override those descriptions provided in the optFlags and optParameters variables. zsh_multiUse = ["foo", "bar"] A sequence containing those long option names which may appear on the command line more than once. By default, options will only be completed one time. zsh_mutuallyExclusive = [("foo", "bar"), ("bar", "baz")] A sequence of sequences, with each sub-sequence containing those long option names that are mutually exclusive. That is, those options that cannot appear on the command line together. zsh_actions = {"foo":'_files -g "*.foo"', "bar":"(one two three)", "colors":"_values -s , 'colors to use' red green blue"} A dict mapping long option names to Zsh "actions". These actions define what will be completed as the argument to the given option. By default, all files/dirs will be completed if no action is given. Callables may instead be given for the values in this dict. The callable should accept no arguments, and return a string that will be used as the zsh "action" in the same way as the literal strings in the examples above. As you can see in the example above. The "foo" option will have files that end in .foo completed when the user presses Tab. The "bar" option will have either of the strings "one", "two", or "three" completed when the user presses Tab. "colors" will allow multiple arguments to be completed, seperated by commas. The possible arguments are red, green, and blue. Examples:: my_command --foo some-file.foo --colors=red,green my_command --colors=green my_command --colors=green,blue Actions may take many forms, and it is beyond the scope of this document to illustrate them all. Please refer to the documention for the Zsh _arguments function. zshcomp is basically a front-end to Zsh's _arguments completion function. That documentation is available on the zsh web site at this URL: U{http://zsh.sunsite.dk/Doc/Release/zsh_19.html#SEC124} zsh_actionDescr = {"logfile":"log file name", "random":"random seed"} A dict mapping long option names to a description for the corresponding zsh "action". These descriptions are show above the generated matches when the user is doing completions for this option. Normally Zsh does not show these descriptions unless you have "verbose" completion turned on. Turn on verbosity with this in your ~/.zshrc:: zstyle ':completion:*' verbose yes zstyle ':completion:*:descriptions' format '%B%d%b' zsh_extras = [":file to read from:action", ":file to write to:action"] A sequence of extra arguments that will be passed verbatim to Zsh's _arguments completion function. The _arguments function does all the hard work of doing command line completions. You can see how zshcomp invokes the _arguments call by looking at the generated completion files that this module creates. *** NOTE *** You will need to use this variable to describe completions for normal command line arguments. That is, those arguments that are not associated with an option. That is, the arguments that are given to the parseArgs method of your usage.Options subclass. In the example above, the 1st non-option argument will be described as "file to read from" and completion options will be generated in accordance with the "action". (See above about zsh "actions") The 2nd non-option argument will be described as "file to write to" and the action will be interpreted likewise. Things you can put here are all documented under the _arguments function here: U{http://zsh.sunsite.dk/Doc/Release/zsh_19.html#SEC124} Zsh Notes: To enable advanced completion add something like this to your ~/.zshrc:: autoload -U compinit compinit For some extra verbosity, and general niceness add these lines too:: zstyle ':completion:*' verbose yes zstyle ':completion:*:descriptions' format '%B%d%b' zstyle ':completion:*:messages' format '%d' zstyle ':completion:*:warnings' format 'No matches for: %d' Have fun! """ import itertools, sys, commands, os.path from twisted.python import reflect, util, usage from twisted.scripts.mktap import IServiceMaker class MyOptions(usage.Options): """ Options for this file """ longdesc = "" synopsis = "Usage: python zshcomp.py [--install | -i] | <output directory>" optFlags = [["install", "i", 'Output files to the "installation" directory ' \ '(twisted/python/zsh in the currently active ' \ 'Twisted package)']] optParameters = [["directory", "d", None, "Output files to this directory"]] def postOptions(self): if self['install'] and self['directory']: raise usage.UsageError, "Can't have --install and " \ "--directory at the same time" if not self['install'] and not self['directory']: raise usage.UsageError, "Not enough arguments" if self['directory'] and not os.path.isdir(self['directory']): raise usage.UsageError, "%s is not a directory" % self['directory'] class Builder: def __init__(self, cmd_name, options, file): """ @type cmd_name: C{str} @param cmd_name: The name of the command @type options: C{twisted.usage.Options} @param options: The C{twisted.usage.Options} instance defined for this command @type file: C{file} @param file: The C{file} to write the completion function to """ self.cmd_name = cmd_name self.options = options self.file = file def write(self): """ Write the completion function to the file given to __init__ @return: C{None} """ # by default, we just write out a single call to _arguments self.file.write('#compdef %s\n' % (self.cmd_name,)) gen = ArgumentsGenerator(self.cmd_name, self.options, self.file) gen.write() class SubcommandBuilder(Builder): """ Use this builder for commands that have sub-commands. twisted.python.usage has the notion of sub-commands that are defined using an entirely seperate Options class. """ interface = None subcmdLabel = None def write(self): """ Write the completion function to the file given to __init__ @return: C{None} """ self.file.write('#compdef %s\n' % (self.cmd_name,)) self.file.write('local _zsh_subcmds_array\n_zsh_subcmds_array=(\n') from twisted import plugin as newplugin plugins = newplugin.getPlugins(self.interface) for p in plugins: self.file.write('"%s:%s"\n' % (p.tapname, p.description)) self.file.write(")\n\n") self.options.__class__.zsh_extras = ['*::subcmd:->subcmd'] gen = ArgumentsGenerator(self.cmd_name, self.options, self.file) gen.write() self.file.write("""if (( CURRENT == 1 )); then _describe "%s" _zsh_subcmds_array && ret=0 fi (( ret )) || return 0 service="$words[1]" case $service in\n""" % (self.subcmdLabel,)) plugins = newplugin.getPlugins(self.interface) for p in plugins: self.file.write(p.tapname + ")\n") gen = ArgumentsGenerator(p.tapname, p.options(), self.file) gen.write() self.file.write(";;\n") self.file.write("*) _message \"don't know how to" \ " complete $service\";;\nesac") class MktapBuilder(SubcommandBuilder): """ Builder for the mktap command """ interface = IServiceMaker subcmdLabel = 'tap to build' class TwistdBuilder(SubcommandBuilder): """ Builder for the twistd command """ interface = IServiceMaker subcmdLabel = 'service to run' class ArgumentsGenerator: """ Generate a call to the zsh _arguments completion function based on data in a usage.Options subclass """ def __init__(self, cmd_name, options, file): """ @type cmd_name: C{str} @param cmd_name: The name of the command @type options: C{twisted.usage.Options} @param options: The C{twisted.usage.Options} instance defined for this command @type file: C{file} @param file: The C{file} to write the completion function to """ self.cmd_name = cmd_name self.options = options self.file = file self.altArgDescr = {} self.actionDescr = {} self.multiUse = [] self.mutuallyExclusive = [] self.actions = {} self.extras = [] aCL = reflect.accumulateClassList aCD = reflect.accumulateClassDict aCD(options.__class__, 'zsh_altArgDescr', self.altArgDescr) aCD(options.__class__, 'zsh_actionDescr', self.actionDescr) aCL(options.__class__, 'zsh_multiUse', self.multiUse) aCL(options.__class__, 'zsh_mutuallyExclusive', self.mutuallyExclusive) aCD(options.__class__, 'zsh_actions', self.actions) aCL(options.__class__, 'zsh_extras', self.extras) optFlags = [] optParams = [] aCL(options.__class__, 'optFlags', optFlags) aCL(options.__class__, 'optParameters', optParams) for i, optList in enumerate(optFlags): if len(optList) != 3: optFlags[i] = util.padTo(3, optList) for i, optList in enumerate(optParams): if len(optList) != 4: optParams[i] = util.padTo(4, optList) self.optFlags = optFlags self.optParams = optParams optParams_d = {} for optList in optParams: optParams_d[optList[0]] = optList[1:] self.optParams_d = optParams_d optFlags_d = {} for optList in optFlags: optFlags_d[optList[0]] = optList[1:] self.optFlags_d = optFlags_d optAll_d = {} optAll_d.update(optParams_d) optAll_d.update(optFlags_d) self.optAll_d = optAll_d self.addAdditionalOptions() # makes sure none of the zsh_ data structures reference option # names that don't exist. (great for catching typos) self.verifyZshNames() self.excludes = self.makeExcludesDict() def write(self): """ Write the zsh completion code to the file given to __init__ @return: C{None} """ self.writeHeader() self.writeExtras() self.writeOptions() self.writeFooter() def writeHeader(self): """ This is the start of the code that calls _arguments @return: C{None} """ self.file.write('_arguments -s -A "-*" \\\n') def writeOptions(self): """ Write out zsh code for each option in this command @return: C{None} """ optNames = self.optAll_d.keys() optNames.sort() for long in optNames: self.writeOpt(long) def writeExtras(self): """ Write out the "extras" list. These are just passed verbatim to the _arguments call @return: C{None} """ for s in self.extras: self.file.write(escape(s)) self.file.write(' \\\n') def writeFooter(self): """ Write the last bit of code that finishes the call to _arguments @return: C{None} """ self.file.write('&& return 0\n') def verifyZshNames(self): """ Ensure that none of the names given in zsh_* variables are typoed @return: C{None} @raise ValueError: Raised if unknown option names have been given in zsh_* variables """ def err(name): raise ValueError, "Unknown option name \"%s\" found while\n" \ "examining zsh_ attributes for the %s command" % ( name, self.cmd_name) for name in itertools.chain(self.altArgDescr, self.actionDescr, self.actions, self.multiUse): if name not in self.optAll_d: err(name) for seq in self.mutuallyExclusive: for name in seq: if name not in self.optAll_d: err(name) def excludeStr(self, long, buildShort=False): """ Generate an "exclusion string" for the given option @type long: C{str} @param long: The long name of the option (i.e. "verbose" instead of "v") @type buildShort: C{bool} @param buildShort: May be True to indicate we're building an excludes string for the short option that correspondes to the given long opt @return: The generated C{str} """ if long in self.excludes: exclusions = self.excludes[long][:] else: exclusions = [] # if long isn't a multiUse option (can't appear on the cmd line more # than once), then we have to exclude the short option if we're # building for the long option, and vice versa. if long not in self.multiUse: if buildShort is False: short = self.getShortOption(long) if short is not None: exclusions.append(short) else: exclusions.append(long) if not exclusions: return '' strings = [] for optName in exclusions: if len(optName) == 1: # short option strings.append("-" + optName) else: strings.append("--" + optName) return "(%s)" % " ".join(strings) def makeExcludesDict(self): """ @return: A C{dict} that maps each option name appearing in self.mutuallyExclusive to a list of those option names that is it mutually exclusive with (can't appear on the cmd line with) """ #create a mapping of long option name -> single character name longToShort = {} for optList in itertools.chain(self.optParams, self.optFlags): try: if optList[1] != None: longToShort[optList[0]] = optList[1] except IndexError: pass excludes = {} for lst in self.mutuallyExclusive: for i, long in enumerate(lst): tmp = [] tmp.extend(lst[:i]) tmp.extend(lst[i+1:]) for name in tmp[:]: if name in longToShort: tmp.append(longToShort[name]) if long in excludes: excludes[long].extend(tmp) else: excludes[long] = tmp return excludes def writeOpt(self, long): """ Write out the zsh code for the given argument. This is just part of the one big call to _arguments @type long: C{str} @param long: The long name of the option (i.e. "verbose" instead of "v") @return: C{None} """ if long in self.optFlags_d: # It's a flag option. Not one that takes a parameter. long_field = "--%s" % long else: long_field = "--%s=" % long short = self.getShortOption(long) if short != None: short_field = "-" + short else: short_field = '' descr = self.getDescription(long) descr_field = descr.replace("[", "\[") descr_field = descr_field.replace("]", "\]") descr_field = '[%s]' % descr_field if long in self.actionDescr: actionDescr_field = self.actionDescr[long] else: actionDescr_field = descr action_field = self.getAction(long) if long in self.multiUse: multi_field = '*' else: multi_field = '' longExclusions_field = self.excludeStr(long) if short: #we have to write an extra line for the short option if we have one shortExclusions_field = self.excludeStr(long, buildShort=True) self.file.write(escape('%s%s%s%s%s' % (shortExclusions_field, multi_field, short_field, descr_field, action_field))) self.file.write(' \\\n') self.file.write(escape('%s%s%s%s%s' % (longExclusions_field, multi_field, long_field, descr_field, action_field))) self.file.write(' \\\n') def getAction(self, long): """ Return a zsh "action" string for the given argument @return: C{str} """ if long in self.actions: if callable(self.actions[long]): action = self.actions[long]() else: action = self.actions[long] return ":%s:%s" % (self.getActionDescr(long), action) if long in self.optParams_d: return ':%s:_files' % self.getActionDescr(long) return '' def getActionDescr(self, long): """ Return the description to be used when this argument is completed @return: C{str} """ if long in self.actionDescr: return self.actionDescr[long] else: return long def getDescription(self, long): """ Return the description to be used for this argument @return: C{str} """ #check if we have an alternate descr for this arg, and if so use it if long in self.altArgDescr: return self.altArgDescr[long] #otherwise we have to get it from the optFlags or optParams try: descr = self.optFlags_d[long][1] except KeyError: try: descr = self.optParams_d[long][2] except KeyError: descr = None if descr is not None: return descr # lets try to get it from the opt_foo method doc string if there is one longMangled = long.replace('-', '_') # this is what t.p.usage does obj = getattr(self.options, 'opt_%s' % longMangled, None) if obj: descr = descrFromDoc(obj) if descr is not None: return descr return long # we really ought to have a good description to use def getShortOption(self, long): """ Return the short option letter or None @return: C{str} or C{None} """ optList = self.optAll_d[long] try: return optList[0] or None except IndexError: pass def addAdditionalOptions(self): """ Add additional options to the optFlags and optParams lists. These will be defined by 'opt_foo' methods of the Options subclass @return: C{None} """ methodsDict = {} reflect.accumulateMethods(self.options, methodsDict, 'opt_') methodToShort = {} for name in methodsDict.copy(): if len(name) == 1: methodToShort[methodsDict[name]] = name del methodsDict[name] for methodName, methodObj in methodsDict.items(): long = methodName.replace('_', '-') # t.p.usage does this # if this option is already defined by the optFlags or # optParameters then we don't want to override that data if long in self.optAll_d: continue descr = self.getDescription(long) short = None if methodObj in methodToShort: short = methodToShort[methodObj] reqArgs = methodObj.im_func.func_code.co_argcount if reqArgs == 2: self.optParams.append([long, short, None, descr]) self.optParams_d[long] = [short, None, descr] self.optAll_d[long] = [short, None, descr] elif reqArgs == 1: self.optFlags.append([long, short, descr]) self.optFlags_d[long] = [short, descr] self.optAll_d[long] = [short, None, descr] else: raise TypeError, '%r has wrong number ' \ 'of arguments' % (methodObj,) def descrFromDoc(obj): """ Generate an appropriate description from docstring of the given object """ if obj.__doc__ is None: return None lines = obj.__doc__.split("\n") descr = None try: if lines[0] != "" and not lines[0].isspace(): descr = lines[0].lstrip() # skip first line if it's blank elif lines[1] != "" and not lines[1].isspace(): descr = lines[1].lstrip() except IndexError: pass return descr def firstLine(s): """ Return the first line of the given string """ try: i = s.index('\n') return s[:i] except ValueError: return s def escape(str): """ Shell escape the given string """ return commands.mkarg(str)[1:] def siteFunctionsPath(): """ Return the path to the system-wide site-functions directory or C{None} if it cannot be determined """ try: cmd = "zsh -f -c 'echo ${(M)fpath:#/*/site-functions}'" output = commands.getoutput(cmd) if os.path.isdir(output): return output except: pass generateFor = [('conch', 'twisted.conch.scripts.conch', 'ClientOptions'), ('mktap', 'twisted.scripts.mktap', 'FirstPassOptions'), ('trial', 'twisted.scripts.trial', 'Options'), ('cftp', 'twisted.conch.scripts.cftp', 'ClientOptions'), ('tapconvert', 'twisted.scripts.tapconvert', 'ConvertOptions'), ('twistd', 'twisted.scripts.twistd', 'ServerOptions'), ('ckeygen', 'twisted.conch.scripts.ckeygen', 'GeneralOptions'), ('lore', 'twisted.lore.scripts.lore', 'Options'), ('pyhtmlizer', 'twisted.scripts.htmlizer', 'Options'), ('tap2deb', 'twisted.scripts.tap2deb', 'MyOptions'), ('tkconch', 'twisted.conch.scripts.tkconch', 'GeneralOptions'), ('manhole', 'twisted.scripts.manhole', 'MyOptions'), ('tap2rpm', 'twisted.scripts.tap2rpm', 'MyOptions'), ('websetroot', None, None), ('tkmktap', None, None), ] # NOTE: the commands using None above are no longer included in Twisted. # However due to limitations in zsh's completion system the version of # _twisted_zsh_stub shipped with zsh contains a static list of Twisted's # commands. It will display errors if completion functions for these missing # commands are not found :( So we just include dummy (empty) completion # function files specialBuilders = {'mktap' : MktapBuilder, 'twistd' : TwistdBuilder} def makeCompFunctionFiles(out_path, generateFor=generateFor, specialBuilders=specialBuilders): """ Generate completion function files in the given directory for all twisted commands @type out_path: C{str} @param out_path: The path to the directory to generate completion function fils in @param generateFor: Sequence in the form of the 'generateFor' top-level variable as defined in this module. Indicates what commands to build completion files for. @param specialBuilders: Sequence in the form of the 'specialBuilders' top-level variable as defined in this module. Indicates what commands require a special Builder class. @return: C{list} of 2-tuples of the form (cmd_name, error) indicating commands that we skipped building completions for. cmd_name is the name of the skipped command, and error is the Exception that was raised when trying to import the script module. Commands are usually skipped due to a missing dependency, e.g. Tkinter. """ skips = [] for cmd_name, module_name, class_name in generateFor: if module_name is None: # create empty file f = _openCmdFile(out_path, cmd_name) f.close() continue try: m = __import__('%s' % (module_name,), None, None, (class_name)) f = _openCmdFile(out_path, cmd_name) o = getattr(m, class_name)() # instantiate Options class if cmd_name in specialBuilders: b = specialBuilders[cmd_name](cmd_name, o, f) b.write() else: b = Builder(cmd_name, o, f) b.write() except Exception, e: skips.append( (cmd_name, e) ) continue return skips def _openCmdFile(out_path, cmd_name): return file(os.path.join(out_path, '_'+cmd_name), 'w') def run(): options = MyOptions() try: options.parseOptions(sys.argv[1:]) except usage.UsageError, e: print e print options.getUsage() sys.exit(2) if options['install']: import twisted dir = os.path.join(os.path.dirname(twisted.__file__), "python", "zsh") skips = makeCompFunctionFiles(dir) else: skips = makeCompFunctionFiles(options['directory']) for cmd_name, error in skips: sys.stderr.write("zshcomp: Skipped building for %s. Script module " \ "could not be imported:\n" % (cmd_name,)) sys.stderr.write(str(error)+'\n') if skips: sys.exit(3) if __name__ == '__main__': run()
apache-2.0
caphrim007/ansible
lib/ansible/modules/windows/win_firewall_rule.py
28
2799
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2014, Timothy Vandenbrande <timothy.vandenbrande@gmail.com> # Copyright: (c) 2017, Artem Zinenko <zinenkoartem@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_firewall_rule version_added: "2.0" author: - Artem Zinenko (@ar7z1) - Timothy Vandenbrande (@TimothyVandenbrande) short_description: Windows firewall automation description: - Allows you to create/remove/update firewall rules. options: enabled: description: - Is this firewall rule enabled or disabled. type: bool default: 'yes' aliases: [ enable ] state: description: - Should this rule be added or removed. choices: [ absent, present ] default: present name: description: - The rules name required: yes direction: description: - Is this rule for inbound or outbound traffic. required: yes choices: [ in, out ] action: description: - What to do with the items this rule is for. required: yes choices: [ allow, block, bypass ] description: description: - Description for the firewall rule. localip: description: - The local ip address this rule applies to. default: any remoteip: description: - The remote ip address/range this rule applies to. default: any localport: description: - The local port this rule applies to. remoteport: description: - The remote port this rule applies to. program: description: - The program this rule applies to. service: description: - The service this rule applies to. protocol: description: - The protocol this rule applies to. default: any profiles: description: - The profile this rule applies to. type: list default: domain,private,public aliases: [ profile ] force: description: - Replace any existing rule by removing it first. - This is no longer required in 2.4 as rules no longer need replacing when being modified. - DEPRECATED in 2.4 and will be removed in 2.9. type: bool default: 'no' ''' EXAMPLES = r''' - name: Firewall rule to allow SMTP on TCP port 25 win_firewall_rule: name: SMTP localport: 25 action: allow direction: in protocol: tcp state: present enabled: yes - name: Firewall rule to allow RDP on TCP port 3389 win_firewall_rule: name: Remote Desktop localport: 3389 action: allow direction: in protocol: tcp profiles: private state: present enabled: yes '''
gpl-3.0
qrsforever/workspace
python/test/fangzu/collect.py
1
2931
#!/usr/bin/python3 # -*- coding: utf-8 -*- import re from room import RoomInfo from urllib.request import Request, urlopen from urllib.error import URLError import matplotlib.pyplot as plt WEB_SITE = "http://bj.58.com" AREA = "gaobeidianbj" TYPE = "zufang" SUFFIX = "pn" MAX_PAGES = 20 RE_ROOM = r'<p class="room">(\d{1}.*?)\s+(\d*).*?</p>.*?<div class="money">.*?(\d+).*?</div>' COLLECT_RE = re.compile(RE_ROOM, re.S) all_rooms = [] rooms_30to50 = [] rooms_50to70 = [] rooms_70to90 = [] def get_url(page): if page > MAX_PAGES: return None url = "/".join([WEB_SITE, AREA, TYPE, SUFFIX]) + str(page) print(url) return url def show(): # xdata = [] # ydata = [] # plt.figure() # print("") print("##############面积大小 30 ~ 50 ##########") # cnt = rooms_30to50.count() # plt.xlim(0, cnt); # plt.ylim(2000, 5000) # line, = plt.plot(xdata, ydata, 'r-') for i, x in enumerate(rooms_30to50): print(x) # xdata.append(i) # ydata.append(x[3]) print("") print("##############面积大小 50 ~ 70 ##########") for x in rooms_50to70: print(x) print("") print("##############面积大小 70 ~ 90 ##########") for x in rooms_70to90: print(x) pass for i in range(20): pagei = i + 1 req = Request(get_url(pagei)) try: html = urlopen(req) data = html.read() data = data.decode(encoding='UTF-8', errors='ignore') with open('pages/' + str(pagei) + '.html', 'wt') as f: f.write(data) # with open('pages/' + str(pagei) + '.html', 'rt') as f: # data = f.read() data = data.replace("&nbsp;", "") result = COLLECT_RE.findall(data) for item in result: flg = False for x in item: if x == '': flg = True break if flg == True: continue all_rooms.append(RoomInfo(AREA, *item)) for item in all_rooms: # print(item) area = int(item.getArea()) if area < 29 or area > 91: continue if area > 29 and 51 > area: rooms_30to50.append(item) elif area > 49 and 71 > area: rooms_50to70.append(item) elif area > 69 and 91 > area: rooms_70to90.append(item) else: pass show() except URLError as e: if hasattr(e, 'reason'): print('We failed to reach a server.') print('Reason: ', e.reason) elif hasattr(e, 'code'): print("The server couldn't fulfill the request.") print('Error code: ', e.code) else: print("Unknown error!") # data = "<p class=\"room\">主卧(2室) &nbsp;&nbsp;&nbsp;&nbsp;20㎡</p>"
mit
lmazuel/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/connectivity_source_py3.py
7
1352
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ConnectivitySource(Model): """Parameters that define the source of the connection. All required parameters must be populated in order to send to Azure. :param resource_id: Required. The ID of the resource from which a connectivity check will be initiated. :type resource_id: str :param port: The source port from which a connectivity check will be performed. :type port: int """ _validation = { 'resource_id': {'required': True}, } _attribute_map = { 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'port': {'key': 'port', 'type': 'int'}, } def __init__(self, *, resource_id: str, port: int=None, **kwargs) -> None: super(ConnectivitySource, self).__init__(**kwargs) self.resource_id = resource_id self.port = port
mit
takeshineshiro/wagtail
wagtail/tests/customuser/models.py
24
3933
import sys from django.db import models from django.contrib.auth.models import ( Group, Permission, AbstractBaseUser, PermissionsMixin, BaseUserManager) class CustomUserManager(BaseUserManager): def _create_user(self, username, email, password, is_staff, is_superuser, **extra_fields): """ Creates and saves a User with the given username, email and password. """ if not username: raise ValueError('The given username must be set') email = self.normalize_email(email) user = self.model(username=username, email=email, is_staff=is_staff, is_active=True, is_superuser=is_superuser, **extra_fields) user.set_password(password) user.save(using=self._db) return user def create_user(self, username, email=None, password=None, **extra_fields): return self._create_user(username, email, password, False, False, **extra_fields) def create_superuser(self, username, email, password, **extra_fields): return self._create_user(username, email, password, True, True, **extra_fields) class CustomUser(AbstractBaseUser, PermissionsMixin): username = models.CharField(max_length=100, unique=True) email = models.EmailField(max_length=255, blank=True) is_staff = models.BooleanField(default=True) is_active = models.BooleanField(default=True) first_name = models.CharField(max_length=50, blank=True) last_name = models.CharField(max_length=50, blank=True) USERNAME_FIELD = 'username' REQUIRED_FIELDS = ['email'] objects = CustomUserManager() def get_full_name(self): return self.first_name + ' ' + self.last_name def get_short_name(self): return self.first_name class EmailUserManager(BaseUserManager): def _create_user(self, email, password, is_staff, is_superuser, **extra_fields): """ Creates and saves a User with the given email and password. """ email = self.normalize_email(email) user = self.model(email=email, is_staff=is_staff, is_active=True, is_superuser=is_superuser, **extra_fields) user.set_password(password) user.save(using=self._db) return user def create_user(self, email=None, password=None, **extra_fields): return self._create_user(email, password, False, False, **extra_fields) def create_superuser(self, email, password, **extra_fields): return self._create_user(email, password, True, True, **extra_fields) class EmailUser(AbstractBaseUser): # Cant inherit from PermissionsMixin because of clashes with # groups/user_permissions related_names. email = models.EmailField(max_length=255, unique=True) is_staff = models.BooleanField(default=True) is_active = models.BooleanField(default=True) first_name = models.CharField(max_length=50, blank=True) last_name = models.CharField(max_length=50, blank=True) is_superuser = models.BooleanField(default=False) groups = models.ManyToManyField(Group, related_name='+', blank=True) user_permissions = models.ManyToManyField(Permission, related_name='+', blank=True) USERNAME_FIELD = 'email' objects = EmailUserManager() def get_full_name(self): return self.first_name + ' ' + self.last_name def get_short_name(self): return self.first_name def steal_method(name): func = getattr(PermissionsMixin, name) if sys.version_info < (3,): func = func.__func__ setattr(EmailUser, name, func) methods = ['get_group_permissions', 'get_all_permissions', 'has_perm', 'has_perms', 'has_module_perms'] for method in methods: steal_method(method)
bsd-3-clause
openstack/tempest-lib
tempest_lib/common/utils/data_utils.py
10
6088
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import netaddr import random import string import uuid def rand_uuid(): """Generate a random UUID string :return: a random UUID (e.g. '1dc12c7d-60eb-4b61-a7a2-17cf210155b6') :rtype: string """ return str(uuid.uuid4()) def rand_uuid_hex(): """Generate a random UUID hex string :return: a random UUID (e.g. '0b98cf96d90447bda4b46f31aeb1508c') :rtype: string """ return uuid.uuid4().hex def rand_name(name='', prefix=None): """Generate a random name that inclues a random number :param str name: The name that you want to include :param str prefix: The prefix that you want to include :return: a random name. The format is '<prefix>-<random number>-<name>-<random number>'. (e.g. 'prefixfoo-1308607012-namebar-154876201') :rtype: string """ randbits = str(random.randint(1, 0x7fffffff)) rand_name = randbits if name: rand_name = name + '-' + rand_name if prefix: rand_name = prefix + '-' + rand_name return rand_name def rand_password(length=15): """Generate a random password :param int length: The length of password that you expect to set (If it's smaller than 3, it's same as 3.) :return: a random password. The format is '<random upper letter>-<random number>-<random special character> -<random ascii letters or digit characters or special symbols>' (e.g. 'G2*ac8&lKFFgh%2') :rtype: string """ upper = random.choice(string.ascii_uppercase) ascii_char = string.ascii_letters digits = string.digits digit = random.choice(string.digits) puncs = '~!@#$%^&*_=+' punc = random.choice(puncs) seed = ascii_char + digits + puncs pre = upper + digit + punc password = pre + ''.join(random.choice(seed) for x in range(length - 3)) return password def rand_url(): """Generate a random url that inclues a random number :return: a random url. The format is 'https://url-<random number>.com'. (e.g. 'https://url-154876201.com') :rtype: string """ randbits = str(random.randint(1, 0x7fffffff)) return 'https://url-' + randbits + '.com' def rand_int_id(start=0, end=0x7fffffff): """Generate a random integer value :param int start: The value that you expect to start here :param int end: The value that you expect to end here :return: a random integer value :rtype: int """ return random.randint(start, end) def rand_mac_address(): """Generate an Ethernet MAC address :return: an random Ethernet MAC address :rtype: string """ # NOTE(vish): We would prefer to use 0xfe here to ensure that linux # bridge mac addresses don't change, but it appears to # conflict with libvirt, so we use the next highest octet # that has the unicast and locally administered bits set # properly: 0xfa. # Discussion: https://bugs.launchpad.net/nova/+bug/921838 mac = [0xfa, 0x16, 0x3e, random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] return ':'.join(["%02x" % x for x in mac]) def parse_image_id(image_ref): """Return the image id from a given image ref This function just returns the last word of the given image ref string splitting with '/'. :param str image_ref: a string that includes the image id :return: the image id string :rtype: string """ return image_ref.rsplit('/')[-1] def arbitrary_string(size=4, base_text=None): """Return size characters from base_text This generates a string with an arbitrary number of characters, generated by looping the base_text string. If the size is smaller than the size of base_text, returning string is shrinked to the size. :param int size: a returning charactors size :param str base_text: a string you want to repeat :return: size string :rtype: string """ if not base_text: base_text = 'test' return ''.join(itertools.islice(itertools.cycle(base_text), size)) def random_bytes(size=1024): """Return size randomly selected bytes as a string :param int size: a returning bytes size :return: size randomly bytes :rtype: string """ return ''.join([chr(random.randint(0, 255)) for i in range(size)]) def get_ipv6_addr_by_EUI64(cidr, mac): """Generate a IPv6 addr by EUI-64 with CIDR and MAC :param str cidr: a IPv6 CIDR :param str mac: a MAC address :return: an IPv6 Address :rtype: netaddr.IPAddress """ # Check if the prefix is IPv4 address is_ipv4 = netaddr.valid_ipv4(cidr) if is_ipv4: msg = "Unable to generate IP address by EUI64 for IPv4 prefix" raise TypeError(msg) try: eui64 = int(netaddr.EUI(mac).eui64()) prefix = netaddr.IPNetwork(cidr) return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57)) except (ValueError, netaddr.AddrFormatError): raise TypeError('Bad prefix or mac format for generating IPv6 ' 'address by EUI-64: %(prefix)s, %(mac)s:' % {'prefix': cidr, 'mac': mac}) except TypeError: raise TypeError('Bad prefix type for generate IPv6 address by ' 'EUI-64: %s' % cidr)
apache-2.0
LividInstruments/LiveRemoteScripts
_Mono_Framework/_deprecated/StepSeqComponent.py
1
10864
from __future__ import with_statement import Live from itertools import imap, chain, starmap from _Framework.ControlSurfaceComponent import ControlSurfaceComponent from _Framework.CompoundComponent import CompoundComponent from _Framework.SubjectSlot import subject_slot, Subject, subject_slot_group from _Framework.Util import forward_property, find_if from DrumGroupComponent import DrumGroupComponent from NoteEditorComponent import NoteEditorComponent from LoopSelectorComponent import LoopSelectorComponent from PlayheadComponent import PlayheadComponent from NoteEditorPaginator import NoteEditorPaginator class DrumGroupFinderComponent(ControlSurfaceComponent, Subject): """ Looks in the hierarchy of devices of the selected track, looking for the first available drum-rack (deep-first), updating as the device list changes. """ __subject_events__ = ('drum_group',) _drum_group = None @property def drum_group(self): """ The latest found drum rack. """ return self._drum_group @property def root(self): """ The currently observed track. """ return self.song().view.selected_track @subject_slot_group('devices') def _on_devices_changed(self, chain): self.update() @subject_slot_group('chains') def _on_chains_changed(self, chain): self.update() def on_selected_track_changed(self): self.update() def update(self): super(DrumGroupFinderComponent, self).update() if self.is_enabled(): self._update_listeners() self._update_drum_group() def _update_listeners(self): root = self.root devices = list(find_instrument_devices(root)) chains = list(chain([root], *[ d.chains for d in devices ])) self._on_chains_changed.replace_subjects(devices) self._on_devices_changed.replace_subjects(chains) def _update_drum_group(self): drum_group = find_drum_group_device(self.root) if type(drum_group) != type(self._drum_group) or drum_group != self._drum_group: self._drum_group = drum_group self.notify_drum_group() def find_instrument_devices(track_or_chain): """ Returns a list with all instrument rack descendants from a track or chain. """ instrument = find_if(lambda d: d.type == Live.Device.DeviceType.instrument, track_or_chain.devices) if instrument and not instrument.can_have_drum_pads: if instrument.can_have_chains: return chain([instrument], *imap(find_instrument_devices, instrument.chains)) return [] def find_drum_group_device(track_or_chain): """ Looks up recursively for a drum_group device in the track. """ instrument = find_if(lambda d: d.type == Live.Device.DeviceType.instrument, track_or_chain.devices) if instrument: if instrument.can_have_drum_pads: return instrument elif instrument.can_have_chains: return find_if(bool, imap(find_drum_group_device, instrument.chains)) class StepSeqComponent(CompoundComponent): """ Step Sequencer Component """ def __init__(self, clip_creator = None, skin = None, grid_resolution = None, note_editor_settings = None, *a, **k): super(StepSeqComponent, self).__init__(*a, **k) assert(clip_creator) assert(skin) assert(grid_resolution) self._grid_resolution = grid_resolution note_editor_settings and self.register_component(note_editor_settings) self._note_editor, self._loop_selector, self._big_loop_selector, self._drum_group = self.register_components(NoteEditorComponent(settings_mode=note_editor_settings, clip_creator=clip_creator, grid_resolution=self._grid_resolution), LoopSelectorComponent(clip_creator=clip_creator), LoopSelectorComponent(clip_creator=clip_creator, measure_length=2.0), DrumGroupComponent()) self._paginator = NoteEditorPaginator([self._note_editor]) self._big_loop_selector.set_enabled(False) self._big_loop_selector.set_paginator(self._paginator) self._loop_selector.set_paginator(self._paginator) self._shift_button = None self._delete_button = None self._mute_button = None self._solo_button = None self._note_editor_matrix = None self._on_pressed_pads_changed.subject = self._drum_group self._on_detail_clip_changed.subject = self.song().view self._detail_clip = None self._playhead = None self._playhead_component = self.register_component(PlayheadComponent(grid_resolution=grid_resolution, paginator=self._paginator, follower=self._loop_selector, notes=chain(*starmap(range, ((92, 100), (84, 92), (76, 84), (68, 76)))), triplet_notes=chain(*starmap(range, ((92, 98), (84, 90), (76, 82), (68, 74)))))) self._skin = skin self._playhead_color = 'NoteEditor.Playhead' def set_playhead(self, playhead): self._playhead = playhead self._playhead_component.set_playhead(playhead) self._update_playhead_color() def _get_playhead_color(self): return self._playhead_color def _set_playhead_color(self, value): self._playhead_color = 'NoteEditor.' + value self._update_playhead_color() playhead_color = property(_get_playhead_color, _set_playhead_color) def _is_triplet_quantization(self): return self._grid_resolution.clip_grid[1] def _update_playhead_color(self): if self.is_enabled() and self._skin and self._playhead: self._playhead.velocity = int(self._skin[self._playhead_color]) def set_drum_group_device(self, drum_group_device): self._drum_group.set_drum_group_device(drum_group_device) self._on_selected_drum_pad_changed.subject = drum_group_device.view if drum_group_device else None self._on_selected_drum_pad_changed() def set_touch_strip(self, touch_strip): self._drum_group.set_page_strip(touch_strip) def set_detail_touch_strip(self, touch_strip): self._drum_group.set_scroll_strip(touch_strip) def set_quantize_button(self, button): self._drum_group.set_quantize_button(button) def set_full_velocity_button(self, button): self._note_editor.set_full_velocity_button(button) def set_select_button(self, button): self._drum_group.set_select_button(button) self._loop_selector.set_select_button(button) def set_mute_button(self, button): self._drum_group.set_mute_button(button) self._note_editor.set_mute_button(button) self._mute_button = button def set_solo_button(self, button): self._drum_group.set_solo_button(button) self._solo_button = button def set_shift_button(self, button): self._big_loop_selector.set_select_button(button) self._shift_button = button self._on_shift_value.subject = button def set_delete_button(self, button): self._delete_button = button self._drum_group.set_delete_button(button) def set_next_loop_page_button(self, button): self._loop_selector.next_page_button.set_control_element(button) def set_prev_loop_page_button(self, button): self._loop_selector.prev_page_button.set_control_element(button) def set_loop_selector_matrix(self, matrix): self._loop_selector.set_loop_selector_matrix(matrix) def set_short_loop_selector_matrix(self, matrix): self._loop_selector.set_short_loop_selector_matrix(matrix) def set_follow_button(self, button): self._loop_selector.set_follow_button(button) self._big_loop_selector.set_follow_button(button) def set_drum_matrix(self, matrix): self._drum_group.set_drum_matrix(matrix) def set_drum_bank_up_button(self, button): self._drum_group.set_scroll_page_up_button(button) def set_drum_bank_down_button(self, button): self._drum_group.set_scroll_page_down_button(button) def set_drum_bank_detail_up_button(self, button): self._drum_group.set_scroll_up_button(button) def set_drum_bank_detail_down_button(self, button): self._drum_group.set_scroll_down_button(button) def set_button_matrix(self, matrix): self._note_editor_matrix = matrix self._update_note_editor_matrix() def set_quantization_buttons(self, buttons): self._grid_resolution.set_buttons(buttons) def set_velocity_control(self, control): self._note_editor.set_velocity_control(control) def set_length_control(self, control): self._note_editor.set_length_control(control) def set_nudge_control(self, control): self._note_editor.set_nudge_control(control) @forward_property('_note_editor') def full_velocity(self): pass def update(self): super(StepSeqComponent, self).update() self._on_detail_clip_changed() self._update_playhead_color() @subject_slot('detail_clip') def _on_detail_clip_changed(self): clip = self.song().view.detail_clip clip = clip if self.is_enabled() and clip and clip.is_midi_clip else None self._detail_clip = clip self._note_editor.set_detail_clip(clip) self._loop_selector.set_detail_clip(clip) self._big_loop_selector.set_detail_clip(clip) self._playhead_component.set_clip(self._detail_clip) @subject_slot('value') def _on_shift_value(self, value): if self.is_enabled(): self._update_note_editor_matrix(enable_big_loop_selector=value and not self._loop_selector.is_following) @subject_slot('selected_drum_pad') def _on_selected_drum_pad_changed(self): drum_group_view = self._on_selected_drum_pad_changed.subject if drum_group_view: selected_drum_pad = drum_group_view.selected_drum_pad if selected_drum_pad: self._note_editor.editing_note = selected_drum_pad.note @subject_slot('pressed_pads') def _on_pressed_pads_changed(self): self._note_editor.modify_all_notes_enabled = bool(self._drum_group.pressed_pads) def _update_note_editor_matrix(self, enable_big_loop_selector = False): if enable_big_loop_selector: self._note_editor.set_enabled(False) self._note_editor.set_button_matrix(None) self._big_loop_selector.set_enabled(True) self._big_loop_selector.set_loop_selector_matrix(self._note_editor_matrix) else: self._big_loop_selector.set_enabled(False) self._big_loop_selector.set_loop_selector_matrix(None) self._note_editor.set_enabled(True) self._note_editor.set_button_matrix(self._note_editor_matrix)
mit
ZeromusSoftware/RPi3500
apiweb/twitterAPI-Python-RPi/Adafruit_Python_DHT-master/Adafruit_DHT/Test.py
2
1618
# Copyright (c) 2014 Adafruit Industries # Author: Tony DiCola # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import common import .Test_Driver as driver def read(sensor, pin): # Get a reading from C driver code. result, humidity, temp = driver.read(sensor, pin) if result in common.TRANSIENT_ERRORS: # Signal no result could be obtained, but the caller can retry. return (None, None) elif result != common.DHT_SUCCESS: # Some kind of error occured. raise RuntimeError('Error calling DHT test driver read: {0}'.format(result)) return (humidity, temp)
gpl-2.0
j831/zulip
contrib_bots/bots/converter/converter.py
1
4887
# See readme.md for instructions on running this code. from __future__ import absolute_import from __future__ import division import copy import importlib import sys from math import log10, floor from . import utils import re def is_float(value): try: float(value) return True except ValueError: return False # Rounds the number 'x' to 'digits' significant digits. # A normal 'round()' would round the number to an absolute amount of # fractional decimals, e.g. 0.00045 would become 0.0. # 'round_to()' rounds only the digits that are not 0. # 0.00045 would then become 0.0005. def round_to(x, digits): return round(x, digits-int(floor(log10(abs(x))))) class ConverterHandler(object): ''' This plugin allows users to make conversions between various units, e.g. Celsius to Fahrenheit, or kilobytes to gigabytes. It looks for messages of the format '@mention-bot <number> <unit_from> <unit_to>' The message '@mention-bot help' posts a short description of how to use the plugin, along with a list of all supported units. ''' def usage(self): return ''' This plugin allows users to make conversions between various units, e.g. Celsius to Fahrenheit, or kilobytes to gigabytes. It looks for messages of the format '@mention-bot <number> <unit_from> <unit_to>' The message '@mention-bot help' posts a short description of how to use the plugin, along with a list of all supported units. ''' def handle_message(self, message, client, state_handler): bot_response = get_bot_converter_response(message, client) client.send_reply(message, bot_response) def get_bot_converter_response(message, client): content = message['content'] words = content.lower().split() convert_indexes = [i for i, word in enumerate(words) if word == "@convert"] convert_indexes = [-1] + convert_indexes results = [] for convert_index in convert_indexes: if (convert_index + 1) < len(words) and words[convert_index + 1] == 'help': results.append(utils.HELP_MESSAGE) continue if (convert_index + 3) < len(words): number = words[convert_index + 1] unit_from = utils.ALIASES.get(words[convert_index + 2], words[convert_index + 2]) unit_to = utils.ALIASES.get(words[convert_index + 3], words[convert_index + 3]) exponent = 0 if not is_float(number): results.append(number + ' is not a valid number. ' + utils.QUICK_HELP) continue number = float(number) number_res = copy.copy(number) for key, exp in utils.PREFIXES.items(): if unit_from.startswith(key): exponent += exp unit_from = unit_from[len(key):] if unit_to.startswith(key): exponent -= exp unit_to = unit_to[len(key):] uf_to_std = utils.UNITS.get(unit_from, False) ut_to_std = utils.UNITS.get(unit_to, False) if uf_to_std is False: results.append(unit_from + ' is not a valid unit. ' + utils.QUICK_HELP) if ut_to_std is False: results.append(unit_to + ' is not a valid unit.' + utils.QUICK_HELP) if uf_to_std is False or ut_to_std is False: continue base_unit = uf_to_std[2] if uf_to_std[2] != ut_to_std[2]: unit_from = unit_from.capitalize() if uf_to_std[2] == 'kelvin' else unit_from results.append(unit_to.capitalize() + ' and ' + unit_from + ' are not from the same category. ' + utils.QUICK_HELP) continue # perform the conversion between the units number_res *= uf_to_std[1] number_res += uf_to_std[0] number_res -= ut_to_std[0] number_res /= ut_to_std[1] if base_unit == 'bit': number_res *= 1024 ** (exponent // 3) else: number_res *= 10 ** exponent number_res = round_to(number_res, 7) results.append('{} {} = {} {}'.format(number, words[convert_index + 2], number_res, words[convert_index + 3])) else: results.append('Too few arguments given. ' + utils.QUICK_HELP) new_content = '' for idx, result in enumerate(results, 1): new_content += ((str(idx) + '. conversion: ') if len(results) > 1 else '') + result + '\n' return new_content handler_class = ConverterHandler
apache-2.0
Ubuntu-Solutions-Engineering/macumba
test/test_deploy.py
1
1395
# Copyright 2014 Canonical, Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest import sys sys.path.insert(0, '../macumba') import macumba IS_CONNECTED = False JUJU_URL = 'wss://localhost:17070/' JUJU_PASS = 'pass' c = macumba.JujuClient(url=JUJU_URL, password=JUJU_PASS) try: c.login() IS_CONNECTED = True except: pass @unittest.skipIf(not IS_CONNECTED, 'Not connected.') class MacumbaDeploySingleTest(unittest.TestCase): def tearDown(self): c.destroy_machines(['1']) def test_deploy(self): ret = c.deploy('mysql', 'mysql') self.assertTrue(not ret) def test_deploy_to(self): c.add_machine() ret = c.deploy('precise/wordpress', 'wordpress', machine_spec='lxc:1') self.assertTrue(not ret)
lgpl-3.0
JPFrancoia/scikit-learn
sklearn/semi_supervised/tests/test_label_propagation.py
44
2262
""" test the label propagation module """ import numpy as np from sklearn.utils.testing import assert_equal from sklearn.semi_supervised import label_propagation from sklearn.metrics.pairwise import rbf_kernel from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal ESTIMATORS = [ (label_propagation.LabelPropagation, {'kernel': 'rbf'}), (label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}), (label_propagation.LabelPropagation, { 'kernel': lambda x, y: rbf_kernel(x, y, gamma=20) }), (label_propagation.LabelSpreading, {'kernel': 'rbf'}), (label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2}), (label_propagation.LabelSpreading, { 'kernel': lambda x, y: rbf_kernel(x, y, gamma=20) }), ] def test_fit_transduction(): samples = [[1., 0.], [0., 2.], [1., 3.]] labels = [0, 1, -1] for estimator, parameters in ESTIMATORS: clf = estimator(**parameters).fit(samples, labels) assert_equal(clf.transduction_[2], 1) def test_distribution(): samples = [[1., 0.], [0., 1.], [1., 1.]] labels = [0, 1, -1] for estimator, parameters in ESTIMATORS: clf = estimator(**parameters).fit(samples, labels) if parameters['kernel'] == 'knn': continue # unstable test; changes in k-NN ordering break it assert_array_almost_equal(clf.predict_proba([[1., 0.0]]), np.array([[1., 0.]]), 2) else: assert_array_almost_equal(np.asarray(clf.label_distributions_[2]), np.array([.5, .5]), 2) def test_predict(): samples = [[1., 0.], [0., 2.], [1., 3.]] labels = [0, 1, -1] for estimator, parameters in ESTIMATORS: clf = estimator(**parameters).fit(samples, labels) assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1])) def test_predict_proba(): samples = [[1., 0.], [0., 1.], [1., 2.5]] labels = [0, 1, -1] for estimator, parameters in ESTIMATORS: clf = estimator(**parameters).fit(samples, labels) assert_array_almost_equal(clf.predict_proba([[1., 1.]]), np.array([[0.5, 0.5]]))
bsd-3-clause
wgm2111/wgm-coursera
machine-learning/machine-learning-ex5/ex5/wgm-ex5.py
1
2281
""" An example script that fits the data with linear regression with a different orders of polynomial. """ # imports import scipy as sp import numpy as np import scipy.io as sio import sklearn.linear_model as linear_model import matplotlib.pyplot as plt # import data ex5_data = sio.loadmat('ex5data1.mat') # Loads the matlab/octave file as a dict # Define variables X = ex5_data['X'] y = ex5_data["y"] Xtest = ex5_data['Xtest'] ytest = ex5_data['ytest'] Xval = ex5_data['Xval'] yval = ex5_data['yval'] # Define higer order features up to polynomial 10 N = 10 X10 = np.array([X.squeeze()**n for n in range(1,N+1)]).transpose() Xtest10 = np.array([Xtest.squeeze()**n for n in range(1,N+1)]).transpose() # Define a lr model and fit for each order polynomial lr_models = [linear_model.LinearRegression(normalize=True) for n in range(N)] [lr_model.fit(X10[:,:n+1], y) for n, lr_model in zip(range(N), lr_models)] lr_models_ridgeCV = [linear_model.RidgeCV([1e-5, 1e-4, 1e-3, 1e-2, 1e-1], normalize=True) for n in range(N)] [lr_model_ridgeCV.fit(X10[:,:n+1], y) for n, lr_model_ridgeCV in zip(range(N), lr_models_ridgeCV)] # Compute the training and test errors for i, models in zip([0,1], [lr_models, lr_models_ridgeCV]): yfit_train = np.array([lr_model.predict(X10[:,:n+1]) for n, lr_model in zip(range(N), models)]) yfit_test = np.array([lr_model.predict(Xtest10[:,:n+1]) for n, lr_model in zip(range(N), models)]) # Cost functions for Npoly = sp.arange(1,11) J_train = 1 / (2.0 * yfit_train.shape[1]) * ((y - yfit_train)**2).sum(1) J_test = 1 / (2.0 * yfit_test.shape[1]) * ((ytest - yfit_test)**2).sum(1) # Make a plot if i == 0 : f0 = plt.figure(0, (5,5), facecolor='white') f0.clf() a0 = f0.add_axes([.1, .1, .85, .85]) a0.plot(Npoly, J_train, 'b', linewidth=2, label="err-train") a0.plot(Npoly, J_test, 'g', linewidth=2, label="err-test") a0.set_title("Error as a function of polynomial order") else: a0.plot(Npoly, J_train, '--b', linewidth=2, label="err-train-RidgeCV") a0.plot(Npoly, J_test, '--g', linewidth=2, label="err-test-RidgeCV") a0.set_ybound(.001, 40) a0.set_xbound(.5, 9.5) a0.legend() f0.show() f0.savefig("wgm-ex5-learning-curve.png")
gpl-2.0
skirsdeda/django
django/contrib/gis/measure.py
93
12310
# Copyright (c) 2007, Robert Coup <robert.coup@onetrackmind.co.nz> # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Distance nor the names of its contributors may be used # to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """ Distance and Area objects to allow for sensible and convenient calculation and conversions. Authors: Robert Coup, Justin Bronn, Riccardo Di Virgilio Inspired by GeoPy (http://exogen.case.edu/projects/geopy/) and Geoff Biggs' PhD work on dimensioned units for robotics. """ __all__ = ['A', 'Area', 'D', 'Distance'] from decimal import Decimal from django.utils.functional import total_ordering from django.utils import six NUMERIC_TYPES = six.integer_types + (float, Decimal) AREA_PREFIX = "sq_" def pretty_name(obj): return obj.__name__ if obj.__class__ == type else obj.__class__.__name__ @total_ordering class MeasureBase(object): STANDARD_UNIT = None ALIAS = {} UNITS = {} LALIAS = {} def __init__(self, default_unit=None, **kwargs): value, self._default_unit = self.default_units(kwargs) setattr(self, self.STANDARD_UNIT, value) if default_unit and isinstance(default_unit, six.string_types): self._default_unit = default_unit def _get_standard(self): return getattr(self, self.STANDARD_UNIT) def _set_standard(self, value): setattr(self, self.STANDARD_UNIT, value) standard = property(_get_standard, _set_standard) def __getattr__(self, name): if name in self.UNITS: return self.standard / self.UNITS[name] else: raise AttributeError('Unknown unit type: %s' % name) def __repr__(self): return '%s(%s=%s)' % (pretty_name(self), self._default_unit, getattr(self, self._default_unit)) def __str__(self): return '%s %s' % (getattr(self, self._default_unit), self._default_unit) # **** Comparison methods **** def __eq__(self, other): if isinstance(other, self.__class__): return self.standard == other.standard else: return NotImplemented def __lt__(self, other): if isinstance(other, self.__class__): return self.standard < other.standard else: return NotImplemented # **** Operators methods **** def __add__(self, other): if isinstance(other, self.__class__): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard + other.standard)}) else: raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)}) def __iadd__(self, other): if isinstance(other, self.__class__): self.standard += other.standard return self else: raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)}) def __sub__(self, other): if isinstance(other, self.__class__): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard - other.standard)}) else: raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)}) def __isub__(self, other): if isinstance(other, self.__class__): self.standard -= other.standard return self else: raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)}) def __mul__(self, other): if isinstance(other, NUMERIC_TYPES): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard * other)}) else: raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)}) def __imul__(self, other): if isinstance(other, NUMERIC_TYPES): self.standard *= float(other) return self else: raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)}) def __rmul__(self, other): return self * other def __truediv__(self, other): if isinstance(other, self.__class__): return self.standard / other.standard if isinstance(other, NUMERIC_TYPES): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard / other)}) else: raise TypeError('%(class)s must be divided with number or %(class)s' % {"class": pretty_name(self)}) def __div__(self, other): # Python 2 compatibility return type(self).__truediv__(self, other) def __itruediv__(self, other): if isinstance(other, NUMERIC_TYPES): self.standard /= float(other) return self else: raise TypeError('%(class)s must be divided with number' % {"class": pretty_name(self)}) def __idiv__(self, other): # Python 2 compatibility return type(self).__itruediv__(self, other) def __bool__(self): return bool(self.standard) def __nonzero__(self): # Python 2 compatibility return type(self).__bool__(self) def default_units(self, kwargs): """ Return the unit value and the default units specified from the given keyword arguments dictionary. """ val = 0.0 default_unit = self.STANDARD_UNIT for unit, value in six.iteritems(kwargs): if not isinstance(value, float): value = float(value) if unit in self.UNITS: val += self.UNITS[unit] * value default_unit = unit elif unit in self.ALIAS: u = self.ALIAS[unit] val += self.UNITS[u] * value default_unit = u else: lower = unit.lower() if lower in self.UNITS: val += self.UNITS[lower] * value default_unit = lower elif lower in self.LALIAS: u = self.LALIAS[lower] val += self.UNITS[u] * value default_unit = u else: raise AttributeError('Unknown unit type: %s' % unit) return val, default_unit @classmethod def unit_attname(cls, unit_str): """ Retrieves the unit attribute name for the given unit string. For example, if the given unit string is 'metre', 'm' would be returned. An exception is raised if an attribute cannot be found. """ lower = unit_str.lower() if unit_str in cls.UNITS: return unit_str elif lower in cls.UNITS: return lower elif lower in cls.LALIAS: return cls.LALIAS[lower] else: raise Exception('Could not find a unit keyword associated with "%s"' % unit_str) class Distance(MeasureBase): STANDARD_UNIT = "m" UNITS = { 'chain': 20.1168, 'chain_benoit': 20.116782, 'chain_sears': 20.1167645, 'british_chain_benoit': 20.1167824944, 'british_chain_sears': 20.1167651216, 'british_chain_sears_truncated': 20.116756, 'cm': 0.01, 'british_ft': 0.304799471539, 'british_yd': 0.914398414616, 'clarke_ft': 0.3047972654, 'clarke_link': 0.201166195164, 'fathom': 1.8288, 'ft': 0.3048, 'german_m': 1.0000135965, 'gold_coast_ft': 0.304799710181508, 'indian_yd': 0.914398530744, 'inch': 0.0254, 'km': 1000.0, 'link': 0.201168, 'link_benoit': 0.20116782, 'link_sears': 0.20116765, 'm': 1.0, 'mi': 1609.344, 'mm': 0.001, 'nm': 1852.0, 'nm_uk': 1853.184, 'rod': 5.0292, 'sears_yd': 0.91439841, 'survey_ft': 0.304800609601, 'um': 0.000001, 'yd': 0.9144, } # Unit aliases for `UNIT` terms encountered in Spatial Reference WKT. ALIAS = { 'centimeter': 'cm', 'foot': 'ft', 'inches': 'inch', 'kilometer': 'km', 'kilometre': 'km', 'meter': 'm', 'metre': 'm', 'micrometer': 'um', 'micrometre': 'um', 'millimeter': 'mm', 'millimetre': 'mm', 'mile': 'mi', 'yard': 'yd', 'British chain (Benoit 1895 B)': 'british_chain_benoit', 'British chain (Sears 1922)': 'british_chain_sears', 'British chain (Sears 1922 truncated)': 'british_chain_sears_truncated', 'British foot (Sears 1922)': 'british_ft', 'British foot': 'british_ft', 'British yard (Sears 1922)': 'british_yd', 'British yard': 'british_yd', "Clarke's Foot": 'clarke_ft', "Clarke's link": 'clarke_link', 'Chain (Benoit)': 'chain_benoit', 'Chain (Sears)': 'chain_sears', 'Foot (International)': 'ft', 'German legal metre': 'german_m', 'Gold Coast foot': 'gold_coast_ft', 'Indian yard': 'indian_yd', 'Link (Benoit)': 'link_benoit', 'Link (Sears)': 'link_sears', 'Nautical Mile': 'nm', 'Nautical Mile (UK)': 'nm_uk', 'US survey foot': 'survey_ft', 'U.S. Foot': 'survey_ft', 'Yard (Indian)': 'indian_yd', 'Yard (Sears)': 'sears_yd' } LALIAS = dict((k.lower(), v) for k, v in ALIAS.items()) def __mul__(self, other): if isinstance(other, self.__class__): return Area(default_unit=AREA_PREFIX + self._default_unit, **{AREA_PREFIX + self.STANDARD_UNIT: (self.standard * other.standard)}) elif isinstance(other, NUMERIC_TYPES): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard * other)}) else: raise TypeError('%(distance)s must be multiplied with number or %(distance)s' % { "distance": pretty_name(self.__class__), }) class Area(MeasureBase): STANDARD_UNIT = AREA_PREFIX + Distance.STANDARD_UNIT # Getting the square units values and the alias dictionary. UNITS = dict(('%s%s' % (AREA_PREFIX, k), v ** 2) for k, v in Distance.UNITS.items()) ALIAS = dict((k, '%s%s' % (AREA_PREFIX, v)) for k, v in Distance.ALIAS.items()) LALIAS = dict((k.lower(), v) for k, v in ALIAS.items()) def __truediv__(self, other): if isinstance(other, NUMERIC_TYPES): return self.__class__(default_unit=self._default_unit, **{self.STANDARD_UNIT: (self.standard / other)}) else: raise TypeError('%(class)s must be divided by a number' % {"class": pretty_name(self)}) def __div__(self, other): # Python 2 compatibility return type(self).__truediv__(self, other) # Shortcuts D = Distance A = Area
bsd-3-clause
lakshayg/tensorflow
tensorflow/contrib/factorization/python/ops/gmm_test.py
41
9763
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ops.gmm.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib from tensorflow.contrib.learn.python.learn.estimators import kmeans from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import random_seed as random_seed_lib from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import random_ops from tensorflow.python.platform import flags from tensorflow.python.platform import test from tensorflow.python.training import queue_runner FLAGS = flags.FLAGS class GMMTest(test.TestCase): def input_fn(self, batch_size=None, points=None): batch_size = batch_size or self.batch_size points = points if points is not None else self.points num_points = points.shape[0] def _fn(): x = constant_op.constant(points) if batch_size == num_points: return x, None indices = random_ops.random_uniform(constant_op.constant([batch_size]), minval=0, maxval=num_points-1, dtype=dtypes.int32, seed=10) return array_ops.gather(x, indices), None return _fn def setUp(self): np.random.seed(3) random_seed_lib.set_random_seed(2) self.num_centers = 2 self.num_dims = 2 self.num_points = 4000 self.batch_size = self.num_points self.true_centers = self.make_random_centers(self.num_centers, self.num_dims) self.points, self.assignments, self.scores = self.make_random_points( self.true_centers, self.num_points) self.true_score = np.add.reduce(self.scores) # Use initial means from kmeans (just like scikit-learn does). clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers) clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None), steps=30) self.initial_means = clusterer.clusters() @staticmethod def make_random_centers(num_centers, num_dims): return np.round( np.random.rand(num_centers, num_dims).astype(np.float32) * 500) @staticmethod def make_random_points(centers, num_points): num_centers, num_dims = centers.shape assignments = np.random.choice(num_centers, num_points) offsets = np.round( np.random.randn(num_points, num_dims).astype(np.float32) * 20) points = centers[assignments] + offsets means = [ np.mean( points[assignments == center], axis=0) for center in xrange(num_centers) ] covs = [ np.cov(points[assignments == center].T) for center in xrange(num_centers) ] scores = [] for r in xrange(num_points): scores.append( np.sqrt( np.dot( np.dot(points[r, :] - means[assignments[r]], np.linalg.inv(covs[assignments[r]])), points[r, :] - means[assignments[r]]))) return (points, assignments, scores) def test_weights(self): """Tests the shape of the weights.""" gmm = gmm_lib.GMM(self.num_centers, initial_clusters=self.initial_means, random_seed=4, config=run_config.RunConfig(tf_random_seed=2)) gmm.fit(input_fn=self.input_fn(), steps=0) weights = gmm.weights() self.assertAllEqual(list(weights.shape), [self.num_centers]) def test_clusters(self): """Tests the shape of the clusters.""" gmm = gmm_lib.GMM(self.num_centers, initial_clusters=self.initial_means, random_seed=4, config=run_config.RunConfig(tf_random_seed=2)) gmm.fit(input_fn=self.input_fn(), steps=0) clusters = gmm.clusters() self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims]) def test_fit(self): gmm = gmm_lib.GMM(self.num_centers, initial_clusters='random', random_seed=4, config=run_config.RunConfig(tf_random_seed=2)) gmm.fit(input_fn=self.input_fn(), steps=1) score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points), steps=1) gmm.fit(input_fn=self.input_fn(), steps=10) score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points), steps=1) self.assertGreater(score1, score2) self.assertNear(self.true_score, score2, self.true_score * 0.15) def test_infer(self): gmm = gmm_lib.GMM(self.num_centers, initial_clusters=self.initial_means, random_seed=4, config=run_config.RunConfig(tf_random_seed=2)) gmm.fit(input_fn=self.input_fn(), steps=60) clusters = gmm.clusters() # Make a small test set num_points = 40 points, true_assignments, true_offsets = ( self.make_random_points(clusters, num_points)) assignments = [] for item in gmm.predict_assignments( input_fn=self.input_fn(points=points, batch_size=num_points)): assignments.append(item) assignments = np.ravel(assignments) self.assertAllEqual(true_assignments, assignments) # Test score score = gmm.score(input_fn=self.input_fn(points=points, batch_size=num_points), steps=1) self.assertNear(score, np.sum(true_offsets), 4.05) def _compare_with_sklearn(self, cov_type): # sklearn version. iterations = 40 np.random.seed(5) sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1]) sklearn_means = np.asarray([[144.83417719, 254.20130341], [274.38754816, 353.16074346]]) sklearn_covs = np.asarray([[[395.0081194, -4.50389512], [-4.50389512, 408.27543989]], [[385.17484203, -31.27834935], [-31.27834935, 391.74249925]]]) # skflow version. gmm = gmm_lib.GMM(self.num_centers, initial_clusters=self.initial_means, covariance_type=cov_type, config=run_config.RunConfig(tf_random_seed=2)) gmm.fit(input_fn=self.input_fn(), steps=iterations) points = self.points[:10, :] skflow_assignments = [] for item in gmm.predict_assignments( input_fn=self.input_fn(points=points, batch_size=10)): skflow_assignments.append(item) self.assertAllClose(sklearn_assignments, np.ravel(skflow_assignments).astype(int)) self.assertAllClose(sklearn_means, gmm.clusters()) if cov_type == 'full': self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01) else: for d in [0, 1]: self.assertAllClose( np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01) def test_compare_full(self): self._compare_with_sklearn('full') def test_compare_diag(self): self._compare_with_sklearn('diag') def test_random_input_large(self): # sklearn version. iterations = 5 # that should be enough to know whether this diverges np.random.seed(5) num_classes = 20 x = np.array([[np.random.random() for _ in range(100)] for _ in range(num_classes)], dtype=np.float32) # skflow version. gmm = gmm_lib.GMM(num_classes, covariance_type='full', config=run_config.RunConfig(tf_random_seed=2)) def get_input_fn(x): def input_fn(): return constant_op.constant(x.astype(np.float32)), None return input_fn gmm.fit(input_fn=get_input_fn(x), steps=iterations) self.assertFalse(np.isnan(gmm.clusters()).any()) class GMMTestQueues(test.TestCase): def input_fn(self): def _fn(): queue = data_flow_ops.FIFOQueue(capacity=10, dtypes=dtypes.float32, shapes=[10, 3]) enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32)) queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, [enqueue_op])) return queue.dequeue(), None return _fn # This test makes sure that there are no deadlocks when using a QueueRunner. # Note that since cluster initialization is dependendent on inputs, if input # is generated using a QueueRunner, one has to make sure that these runners # are started before the initialization. def test_queues(self): gmm = gmm_lib.GMM(2, covariance_type='diag') gmm.fit(input_fn=self.input_fn(), steps=1) if __name__ == '__main__': test.main()
apache-2.0
AntouanK/rethinkdb
external/v8_3.30.33.16/build/gyp/test/library_dirs/gyptest-library-dirs.py
188
1441
#!/usr/bin/env python # Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies library_dirs (in link_settings) are properly found. """ import sys import TestGyp test = TestGyp.TestGyp(formats=['!android']) lib_dir = test.tempdir('secret_location') test.run_gyp('test.gyp', '-D', 'abs_path_to_secret_library_location={0}'.format(lib_dir), chdir='subdir') # Must build each target independently, since they are not in each others' # 'dependencies' (test.ALL does NOT work here for some builders, and in any case # would not ensure the correct ordering). test.build('test.gyp', 'mylib', chdir='subdir') test.build('test.gyp', 'libraries-search-path-test', chdir='subdir') expect = """Hello world """ test.run_built_executable( 'libraries-search-path-test', chdir='subdir', stdout=expect) if sys.platform in ('win32', 'cygwin'): test.run_gyp('test-win.gyp', '-D', 'abs_path_to_secret_library_location={0}'.format(lib_dir), chdir='subdir') test.build('test.gyp', 'mylib', chdir='subdir') test.build('test-win.gyp', 'libraries-search-path-test-lib-suffix', chdir='subdir') test.run_built_executable( 'libraries-search-path-test-lib-suffix', chdir='subdir', stdout=expect) test.pass_test() test.cleanup()
agpl-3.0
BoPeng/simuPOP
test/test_02_population.py
1
98826
#!/usr/bin/env python # # Purpose: # # This is a unittest file for population object # # Bo Peng (bpeng@rice.edu) # # $LastChangedRevision$ # $LastChangedDate$ # import unittest, os, sys, random, copy from simuOpt import setOptions setOptions(quiet=True) new_argv = [] for arg in sys.argv: if arg in ['short', 'long', 'binary', 'mutant', 'lineage']: setOptions(alleleType = arg) elif arg.startswith('-j'): setOptions(numThreads = int(arg[2:])) else: new_argv.append(arg) sys.argv=new_argv from simuPOP import * class TestPopulation(unittest.TestCase): # define a few functions to create basic populations def getPop(self, VSP=False, size=[20, 80], loci = [1, 2], infoFields=['x'], ancGen=0, *arg, **kwargs): pop = Population(size=size, ploidy=2, loci=loci, infoFields=infoFields, ancGen=ancGen, *arg, **kwargs) pop.setGenotype([random.randint(1, 5) for x in range(pop.popSize()*pop.ploidy())]) for info in infoFields: pop.setIndInfo([random.random() for x in range(pop.popSize())], info) for i in range(ancGen): pop.push(self.getPop(size=size, loci=loci, infoFields=infoFields, ancGen=0, *arg, **kwargs)) initSex(pop) if VSP: pop.setVirtualSplitter(SexSplitter()) return pop def testAbsIndIndex(self): 'Testing Population::absIndIndex(idx, subPop), popSize()' pop = self.getPop() # ind, subPop self.assertEqual(pop.absIndIndex(1, 1), 21) self.assertEqual(pop.absIndIndex(10, 0), 10) self.assertRaises(IndexError, pop.absIndIndex, 0, 2 ) self.assertEqual(pop.popSize(), 100) def testSubPop(self): 'Testing Population::subPopBegin(subPop), subPopEnd(subPop), numSubPop()' 'subPopSize(subPop), subPopSizes(), subPopIndPair(idx)' pop = self.getPop() self.assertEqual(pop.subPopBegin(1), 20) self.assertRaises(IndexError, pop.subPopBegin, 2 ) self.assertEqual(pop.subPopEnd(0), 20) self.assertRaises(IndexError, pop.subPopEnd, 2 ) self.assertEqual(pop.numSubPop(), 2) self.assertEqual(pop.subPopSize(0), 20) self.assertEqual(pop.subPopSize(1), 80) self.assertRaises(IndexError, pop.subPopSize, 2 ) self.assertEqual(pop.subPopSizes(), (20, 80) ) self.assertEqual(pop.subPopIndPair(21), (1, 1) ) self.assertRaises(IndexError, pop.subPopIndPair, 200 ) stat(pop, numOfMales=True, vars=['numOfMales_sp', 'numOfFemales_sp']) pop.setVirtualSplitter(SexSplitter()) self.assertEqual(pop.subPopSize([1, 0]), pop.dvars(1).numOfMales) self.assertEqual(pop.subPopSize([1, 1]), pop.dvars(1).numOfFemales) def testVirtualSubPop(self): 'Testing Population::numVirtualSubPop(), setVirtualSplitter(splitter), subPopName(subPop)' pop = Population(1000, infoFields=['x']) for ind in pop.individuals(): ind.setInfo(random.randint(10, 20), 'x') pop.setVirtualSplitter(InfoSplitter('x', values=list(range(10, 15)))) self.assertEqual(pop.numVirtualSubPop(), 5) self.assertEqual(pop.subPopName(0), "") self.assertEqual(pop.subPopName([0, 0]), "x = 10") self.assertEqual(pop.subPopName([0, 1]), "x = 11") self.assertEqual(pop.subPopName([0, 4]), "x = 14") self.assertRaises(IndexError, pop.subPopName, 1) self.assertRaises(IndexError, pop.subPopName, [0, 5]) # this function accepts vsp name pop.individuals([0, 'x = 10']) # with given names pop = Population(size=[200, 500], infoFields=['x'], subPopNames=['A', 'B']) for ind in pop.individuals(): ind.setInfo(random.randint(10, 20), 'x') pop.setVirtualSplitter(InfoSplitter('x', values=list(range(10, 15)))) self.assertEqual(pop.numVirtualSubPop(), 5) self.assertEqual(pop.subPopName(0), "A") self.assertEqual(pop.subPopName(1), "B") self.assertRaises(IndexError, pop.subPopName, 2) self.assertEqual(pop.subPopName([0, 0]), "A - x = 10") self.assertEqual(pop.subPopName([0, 1]), "A - x = 11") self.assertEqual(pop.subPopName([1, 4]), "B - x = 14") self.assertRaises(IndexError, pop.subPopName, [0, 5]) def testPopSize(self): 'Testing Population.popSize by male, female, pair' pop = self.getPop(size=[80, 20, 30, 50], ancGen=5) pop.mergeSubPops([1, 2]) pop.removeIndividuals([1, 59, 130]) initSex(pop) pop.setVirtualSplitter(SexSplitter()) self.assertEqual(pop.popSize(), 177) self.assertEqual(pop.popSize(1), 180) self.assertEqual(pop.subPopSizes(), (78, 50, 49)) self.assertEqual(pop.subPopSize(1), 50) self.assertEqual(pop.subPopSizes(1), (80, 20, 30, 50)) self.assertEqual(pop.subPopSize(1, 1), 20) pop.useAncestralGen(2) self.assertEqual(pop.popSize(), 180) self.assertEqual(pop.subPopSizes(), (80, 20, 30, 50)) self.assertEqual(pop.popSize(2), 180) self.assertEqual(pop.subPopSizes(2), (80, 20, 30, 50)) self.assertEqual(pop.popSize(3), 180) self.assertEqual(pop.subPopSizes(3), (80, 20, 30, 50)) self.assertEqual(pop.subPopSize(1, 3), 20) # virtual stat(pop, numOfMales=True, subPops=[1]) self.assertEqual(pop.subPopSize((1,0)), pop.dvars().numOfMales) pop.useAncestralGen(0) self.assertEqual(pop.subPopSize((1,0), ancGen=2), pop.dvars().numOfMales) def testPopSizeBySex(self): 'Testing Population.popSize, Population.subPopSizes and Population.subPopSize' pop = self.getPop(size=[90, 10, 30, 50], ancGen=5) initSex(pop, sex=[MALE, FEMALE, MALE]) pop.useAncestralGen(2) initSex(pop, sex=[MALE, FEMALE, FEMALE]) pop.useAncestralGen(0) self.assertEqual(pop.popSize(sex=MALE_ONLY), 120) self.assertEqual(pop.popSize(sex=FEMALE_ONLY), 60) self.assertEqual(pop.popSize(sex=PAIR_ONLY), 60) self.assertEqual(pop.popSize(2, sex=MALE_ONLY), 60) self.assertEqual(pop.popSize(2, sex=FEMALE_ONLY), 120) self.assertEqual(pop.popSize(2, sex=PAIR_ONLY), 60) # VSP pop.setVirtualSplitter(SexSplitter()) self.assertEqual(pop.subPopSize(2, sex=MALE_ONLY), 20) self.assertEqual(pop.subPopSize(2, sex=FEMALE_ONLY), 10) self.assertEqual(pop.subPopSize(2, sex=PAIR_ONLY), 10) self.assertEqual(pop.subPopSize(2, 2, sex=MALE_ONLY), 10) self.assertEqual(pop.subPopSize(2, 2, sex=FEMALE_ONLY), 20) self.assertEqual(pop.subPopSize(2, 2, sex=PAIR_ONLY), 10) # self.assertEqual(pop.subPopSize((2, 'Male'), sex=MALE_ONLY), 20) self.assertEqual(pop.subPopSize((2, 'Male'), sex=FEMALE_ONLY), 0) self.assertEqual(pop.subPopSize((2, 'Male'), sex=PAIR_ONLY), 0) self.assertEqual(pop.subPopSize((0, 'Female'), 2, sex=MALE_ONLY), 0) self.assertEqual(pop.subPopSize((0, 'Female'), 2, sex=FEMALE_ONLY), 60) self.assertEqual(pop.subPopSize((0, 'Female'), 2, sex=PAIR_ONLY), 0) def testLociPos(self): 'Testing lociPos parameter of Population::Population' # test for a bug that condier the following two numbers are the same. Population(loci=2, lociPos=[29.114998502, 29.114998525]) def testSubPopName(self): 'Testing Population::setSubPopName(name, subPop), subPopByName(subPop)' pop = self.getPop(size=[80, 20, 30, 50], ancGen=5) pop.setSubPopName('A', 0) pop.setSubPopName('B', 1) pop.setSubPopName('B', 2) pop.setSubPopName('C', 3) self.assertEqual(pop.subPopName(0), 'A') self.assertEqual(pop.subPopName(1), 'B') self.assertEqual(pop.subPopName(2), 'B') self.assertEqual(pop.subPopName(3), 'C') self.assertEqual(pop.subPopByName('A'), 0) self.assertEqual(pop.subPopByName('B'), 1) self.assertEqual(pop.subPopByName('C'), 3) self.assertRaises(ValueError, pop.subPopByName, 'D') def testIndividuals(self): 'Testing function Population::individuals(), individuals(subPop), individual(idx, subPop=0)' def testAllInd(pop): self.assertEqual(len(list(pop.individuals())), pop.popSize()) self.assertEqual(len(list(pop.individuals(0))), pop.subPopSize(0)) self.assertEqual(len(list(pop.individuals(1))), pop.subPopSize(1)) testAllInd(self.getPop()) testAllInd(self.getPop(True)) pop = Population([20, 80], loci = [5, 7], infoFields=['x']) pop.individual(0).setAllele(1, 0) self.assertEqual(pop.individual(0).allele(0), 1) def testGenotype(self): 'Testing Population::genotype(), genotype(subPop)' pop = Population(loci=[1, 2], size=[1, 2]) arr = pop.genotype() self.assertEqual(len(arr), pop.genoSize()*pop.popSize()) arr = pop.genotype(1) self.assertEqual(len(arr), pop.genoSize()*pop.subPopSize(1)) self.assertRaises(IndexError, pop.genotype, 2) def testSetGenotype(self): 'Testing Population::setGenotype(geno), setGenotype(geno, subPop)' pop = Population(loci=[1, 2], size=[1, 2]) self.assertRaises(IndexError, pop.setGenotype, [1], 2) if moduleInfo()['alleleType'] == 'binary': pop.setGenotype([0, 1, 0]) self.assertEqual(pop.individual(0).genotype(), [0, 1, 0, 0, 1, 0]) self.assertEqual(pop.individual(1).genotype(), [0, 1, 0, 0, 1, 0]) self.assertEqual(pop.individual(2).genotype(0), [0, 1, 0]) self.assertEqual(pop.individual(2).genotype(1), [0, 1, 0]) pop.setGenotype([1, 0], 1) self.assertEqual(pop.individual(0).genotype(), [0, 1, 0, 0, 1, 0]) self.assertEqual(pop.individual(1).genotype(0), [1, 0, 1]) self.assertEqual(pop.individual(1).genotype(1), [0, 1, 0]) self.assertEqual(pop.individual(2).genotype(), [1, 0, 1, 0, 1, 0]) # virtual subpopulation pop = self.getPop(size = 100, VSP=True) self.assertEqual(pop.numSubPop(), 1) self.assertEqual(pop.numVirtualSubPop(), 2) pop.setGenotype([5], [0, 0]) pop.setGenotype([6], [0, 1]) for idx, ind in enumerate(pop.individuals([0, 0])): self.assertEqual(ind.allele(idx%6), 1) for idx, ind in enumerate(pop.individuals([0, 1])): self.assertEqual(ind.allele(idx%6), 1) else: pop.setGenotype([1, 2, 3]) self.assertEqual(pop.individual(0).genotype(), [1, 2, 3, 1, 2, 3]) self.assertEqual(pop.individual(1).genotype(), [1, 2, 3, 1, 2, 3]) self.assertEqual(pop.individual(2).genotype(0), [1, 2, 3]) self.assertEqual(pop.individual(2).genotype(1), [1, 2, 3]) pop.setGenotype([2, 4], 1) self.assertEqual(pop.individual(0).genotype(), [1, 2, 3, 1, 2, 3]) self.assertEqual(pop.individual(1).genotype(0), [2, 4, 2]) self.assertEqual(pop.individual(1).genotype(1), [4, 2, 4]) self.assertEqual(pop.individual(2).genotype(), [2, 4, 2, 4, 2, 4]) # virtual subpopulation pop = self.getPop(size = 100, VSP=True) self.assertEqual(pop.numSubPop(), 1) self.assertEqual(pop.numVirtualSubPop(), 2) pop.setGenotype([5], [0, 0]) pop.setGenotype([6], [0, 1]) for idx, ind in enumerate(pop.individuals([0, 0])): self.assertEqual(ind.allele(idx%6), 5) for idx, ind in enumerate(pop.individuals([0, 1])): self.assertEqual(ind.allele(idx%6), 6) def testAncestor(self): 'Testing Population::ancestor(idx, gen), ancestor(idx, gen, subPop), push(pop)' pop = Population([100, 200], loci=[10, 20], infoFields=['x', 'y'], ancGen=5) initSex(pop) initGenotype(pop, freq=[0.2, 0.8]) for ind in pop.individuals(): ind.setInfo(random.randint(4, 10), 'x') ind.setInfo(random.randint(10, 100), 'y') pop1 = Population([200, 100], loci=[10, 20], infoFields=['x', 'y']) initSex(pop1) initGenotype(pop1, freq= [0.5, 0.5]) for ind in pop1.individuals(): ind.setInfo(random.randint(4, 10), 'x') ind.setInfo(random.randint(10, 100), 'y') pop_c = pop.clone() pop.push(pop1) for idx, ind in enumerate(pop_c.individuals()): self.assertEqual(ind, pop.ancestor(idx, 1)) self.assertEqual(ind.info('x'), pop.ancestor(idx, 1).info('x')) self.assertEqual(ind.info('y'), pop.ancestor(idx, 1).info('y')) self.assertRaises(IndexError, pop.ancestor, 2, 10000) self.assertRaises(IndexError, pop.ancestor, 3, 10000) for idx, ind in enumerate(pop_c.individuals(0)): self.assertEqual(ind, pop.ancestor(idx, 1, 0)) def testAncestralGens(self): 'Testing Population::ancestralGens(), setAncestralDepth(depth), useAncestralGen(idx)' pop = Population(size=[3, 5], loci=[2, 3], infoFields=['x']) initSex(pop) initGenotype(pop, freq=[.2, .8]) pop.setIndInfo([random.random() for x in range(8)], 'x') pop.setAncestralDepth(-1) gt = list(pop.genotype()) inf = pop.indInfo('x') self.assertEqual(pop.ancestralGens(), 0) pop1 = Population(size=[2, 3], loci=[2, 3], ancGen=2, infoFields=['x']) initSex(pop1) initGenotype(pop1, freq= [.8, .2]) pop1.setIndInfo([random.random() for x in range(8)], 'x') gt1 = list(pop1.genotype()) inf1 = pop1.indInfo('x') pop.push(pop1) self.assertEqual(pop.ancestralGens(), 1) self.assertEqual(pop.genotype(), gt1) self.assertEqual(pop.indInfo('x'), inf1) # subPopSize, indInfo self.assertEqual(pop.subPopSize(0), 2) self.assertEqual(pop.subPopSize(1), 3) pop.useAncestralGen(1) self.assertEqual(pop.genotype(), gt) self.assertEqual(pop.indInfo('x'), inf) pop.useAncestralGen(0) self.assertEqual(pop.genotype(), gt1) self.assertEqual(pop.indInfo('x'), inf1) pop2 = Population(size=[3, 5], loci=[2, 3], infoFields=['x']) pop2.setIndInfo([random.random() for x in range(8)], 'x') inf2 = pop2.indInfo('x') initSex(pop2) initGenotype(pop2, freq= [.2, .8]) gt2 = list(pop2.genotype()) pop.push(pop2) self.assertEqual(pop.ancestralGens(), 2) self.assertEqual(pop.genotype(), gt2) self.assertEqual(pop.indInfo('x'), inf2) pop.useAncestralGen(1) self.assertEqual(pop.genotype(), gt1) self.assertEqual(pop.indInfo('x'), inf1) pop.useAncestralGen(2) self.assertEqual(pop.genotype(), gt) self.assertEqual(pop.indInfo('x'), inf) # out of bound ancestral generation number self.assertRaises(ValueError, pop.useAncestralGen, 3 ) # setAncestralDepth pop = self.getPop(ancGen = 5) pop.setAncestralDepth(3) self.assertEqual(pop.ancestralGens(), 3) def testAddChrom(self): 'Testing Population::addChrom' pop = self.getPop(chromNames=['c1', 'c2'], lociPos=[1, 3, 5], lociNames = ['l1', 'l2', 'l3'], ancGen=5) pop1 = pop.clone() for gen in range(pop.ancestralGens(), -1, -1): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) pop.setLineage(1) pop1.setLineage(2) pop.addChrom([7, 8, 9], ['l4', 'l5', 'l6'], 'c3') self.assertEqual(pop.numChrom(), 3) self.assertEqual(pop.chromNames(), ('c1', 'c2', 'c3')) self.assertEqual(pop.numLoci(), (1, 2, 3)) for gen in range(pop.ancestralGens(), -1, -1): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) for idx in range(pop.popSize()): ind = pop.individual(idx) ind1 = pop1.individual(idx) for ch in range(2): self.assertEqual(ind.genotype(0, ch), ind1.genotype(0, ch)) self.assertEqual(ind.genotype(1, ch), ind1.genotype(1, ch)) # new chromosome has zero values self.assertEqual(ind.genotype(0, 2), [0]*3) # new alleles are zero self.assertEqual(ind.genotype(1, 2), [0]*3) # new alleles are zero # if moduleInfo()['alleleType'] == 'lineage': self.assertEqual(ind.lineage(0, ch), [1]*ind.numLoci(ch)) self.assertEqual(ind.lineage(0, ch), [1]*ind.numLoci(ch)) self.assertEqual(ind.lineage(0, 2), [0]*3) self.assertEqual(ind.lineage(1, 2), [0]*3) # lociPos is not ordered self.assertRaises(ValueError, pop.addChrom, [13, 12, 11], ['l4', 'l5', 'l6'], 'c3') # given loci names are not unique. self.assertRaises(ValueError, pop.addChrom, [11, 12, 13], ['l4', 'l5', 'l6'], 'c3') # # given chromsome name is not unique. self.assertRaises(ValueError, pop.addChrom, [11, 12, 13], ['l4', 'l5', 'l6'], 'c4') def testAddChromFrom(self): 'Testing Population::addChromFrom(pop)' pop = Population(size=100, ploidy=2, loci=[1, 2], chromNames=["c1", "c2"], lociNames = ['l1', 'l2', 'l3']) pop2 = pop.clone() pop1 = Population(size=100, ploidy=2, loci=[2, 3], chromNames=["c3", "c4"], lociNames = ['l4', 'l5', 'l6', 'l7', 'l8']) pop.addChromFrom(pop1) self.assertEqual(pop.numChrom(), 4) self.assertEqual(pop.chromNames(), ('c1', 'c2', 'c3', 'c4')) self.assertEqual(pop.numLoci(), (1, 2, 2, 3)) for i in range(100): ind = pop.individual(i) ind1 = pop2.individual(i) ind2 = pop1.individual(i) for loc in range(3): self.assertEqual(ind.allele(loc), ind1.allele(loc)) for loc in range(5): self.assertEqual(ind.allele(loc+3), ind2.allele(loc)) pop = Population(size=100, ploidy=2, loci=[1, 2]) pop1 = Population(size=200, ploidy=2, loci=[2, 3], chromNames=["c3", "c4"], lociNames = ['l4', 'l5', 'l6', 'l7', 'l8']) # population size is different self.assertRaises(ValueError, pop.addChromFrom, pop1) # see what happens to alleleNames pop1 = Population(size=100, ploidy=2, loci=[1, 2], chromNames=["c1", "c2"], lociNames = ['l1', 'l2', 'l3'], alleleNames=['A', 'B']) pop2 = Population(size=100, ploidy=2, loci=[2, 3], chromNames=["c3", "c4"], lociNames = ['l4', 'l5', 'l6', 'l7', 'l8']) pop1.addChromFrom(pop2) self.assertEqual(pop1.alleleNames(0), ('A', 'B')) self.assertEqual(pop1.alleleNames(2), ('A', 'B')) self.assertEqual(pop1.alleleNames(3), ()) self.assertEqual(pop1.alleleNames(4), ()) self.assertEqual(pop1.alleleNames(7), ()) # pop1 = Population(size=100, ploidy=2, loci=[1, 2], chromNames=["c1", "c2"], lociNames = ['l1', 'l2', 'l3'], alleleNames=['A', 'B']) pop2 = Population(size=100, ploidy=2, loci=[2], chromNames=["c3"], lociNames = ['l4', 'l5'], alleleNames=[['E', 'F'], ['C', 'D']]) pop1.addChromFrom(pop2) self.assertEqual(pop1.alleleNames(0), ('A', 'B')) self.assertEqual(pop1.alleleNames(2), ('A', 'B')) self.assertEqual(pop1.alleleNames(3), ('E', 'F')) self.assertEqual(pop1.alleleNames(4), ('C', 'D')) def testAddIndFrom(self): 'Testing Population::addIndFrom(pop)' pop = self.getPop(ancGen=3) pop1 = self.getPop(ancGen=3) pop.setIndInfo([random.randint(4, 10) for x in range(pop.popSize())], 'x') pop.addIndFrom(pop1) self.assertEqual(pop.numSubPop(), 4) self.assertEqual(pop.subPopSizes(), (20, 80, 20, 80)) for i in range(100): self.assertEqual(pop.individual(100+i), pop1.individual(i)) pop1 = self.getPop(ancGen=2) # different numbers of ancestral generations self.assertRaises(ValueError, pop.addIndFrom, pop1) pop1 = Population(size=100, ploidy=2, loci=[1, 2, 3]) # different genotype structure self.assertRaises(ValueError, pop.addIndFrom, pop1) def testAddLociFrom(self): 'Testing Population::addLociFrom(pop)' pop = self.getPop(chromNames=["c1", "c2"], ancGen=5, lociPos=[1, 2, 5], lociNames = ['l1', 'l2', 'l3']) pop1 = pop.clone() pop2 = self.getPop(chromNames=["c3", "c4"], ancGen=5, lociPos=[4, 3, 6], lociNames = ['l4', 'l5', 'l6']) for gen in range(pop.ancestralGens(), -1, -1): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) pop2.useAncestralGen(gen) pop.setLineage(1) pop1.setLineage(2) pop2.setLineage(3) pop.addLociFrom(pop2); self.assertEqual(pop.numLoci(), (2, 4)) self.assertEqual(pop.lociPos(), (1, 4, 2, 3, 5, 6)) self.assertEqual(pop.chromNames(), ('c1', 'c2')) for gen in range(pop.ancestralGens(), -1, -1): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) pop2.useAncestralGen(gen) for idx in range(pop.popSize()): ind = pop.individual(idx) inds = [pop1.individual(idx), pop2.individual(idx)] # i: index in population # src: the source Population # j: index in source Population for i, src, j in [(0, 0, 0), (1, 1, 0), (2, 0, 1), (3, 1, 1), (4, 0, 2), (5, 1, 2)]: for p in range(pop.ploidy()): self.assertEqual(ind.allele(i, p), inds[src].allele(j, p)) for i in range(0, 2, 4): for p in range(pop.ploidy()): if moduleInfo()['alleleType'] == 'lineage': self.assertEqual(ind.alleleLineage(i, p), 1) else: self.assertEqual(ind.alleleLineage(i, p), 0) for i in range(1, 3, 5): for p in range(pop.ploidy()): if moduleInfo()['alleleType'] == 'lineage': self.assertEqual(ind.alleleLineage(i, p), 3) else: self.assertEqual(ind.alleleLineage(i, p), 0) # allele names pop = self.getPop(chromNames=["c1", "c2"], ancGen=5, lociPos=[1, 2, 5], lociNames = ['l1', 'l2', 'l3'], alleleNames=['A']) pop2 = self.getPop(chromNames=["c3", "c4"], ancGen=5, lociPos=[4, 3, 6], lociNames = ['l4', 'l5', 'l6'], alleleNames=[['B'], ['C', 'D'], ['E']]) pop.addLociFrom(pop2); self.assertEqual(pop.alleleNames(0), ('A',)) self.assertEqual(pop.alleleNames(1), ('B',)) self.assertEqual(pop.alleleNames(2), ('A',)) self.assertEqual(pop.alleleNames(3), ('C', 'D')) self.assertEqual(pop.alleleNames(4), ('A',)) self.assertEqual(pop.alleleNames(5), ('E',)) def testAddLociFromByName(self): 'Testing Population::addLociFrom(pop, byName=True)' pop = self.getPop(chromNames=["c1", "c2"], ancGen=5, lociPos=[1, 2, 5], lociNames = ['l1', 'l2', 'l3']) pop1 = pop.clone() pop2 = self.getPop(chromNames=["c2", "c4"], ancGen=5, lociPos=[4, 3, 6], lociNames = ['l4', 'l5', 'l6']) for gen in range(pop.ancestralGens(), -1, -1): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) pop2.useAncestralGen(gen) pop.setLineage(1) pop1.setLineage(2) pop2.setLineage(3) pop.addLociFrom(pop2, byName=True); self.assertEqual(pop.numLoci(), (1, 3, 2)) self.assertEqual(pop.lociPos(), (1, 2, 4, 5, 3, 6)) self.assertEqual(pop.chromNames(), ('c1', 'c2', 'c4')) for gen in range(pop.ancestralGens(), -1, -1): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) pop2.useAncestralGen(gen) for idx in range(pop.popSize()): ind = pop.individual(idx) inds = [pop1.individual(idx), pop2.individual(idx)] # i: index in population # src: the source Population # j: index in source Population for i, src, j in [(0, 0, 0), (1, 0, 1), (2, 1, 0), (3, 0, 2), (4, 1, 1), (5, 1, 2)]: for p in range(pop.ploidy()): self.assertEqual(ind.allele(i, p), inds[src].allele(j, p)) for i in range(0, 1, 3): for p in range(pop.ploidy()): if moduleInfo()['alleleType'] == 'lineage': self.assertEqual(ind.alleleLineage(i, p), 1) else: self.assertEqual(ind.alleleLineage(i, p), 0) for i in range(2, 4, 5): for p in range(pop.ploidy()): if moduleInfo()['alleleType'] == 'lineage': #??? self.assertEqual(ind.alleleLineage(i, p), 3) else: self.assertEqual(ind.alleleLineage(i, p), 0) # allele names pop = self.getPop(chromNames=["c1", "c2"], ancGen=5, lociPos=[1, 2, 5], lociNames = ['l1', 'l2', 'l3'], alleleNames=['A']) pop2 = self.getPop(chromNames=["c3", "c2"], ancGen=5, lociPos=[4, 3, 6], lociNames = ['l4', 'l5', 'l6'], alleleNames=[['B'], ['C', 'D'], ['E']]) pop.addLociFrom(pop2, True); self.assertEqual(pop.alleleNames(0), ('A',)) self.assertEqual(pop.alleleNames(1), ('A',)) self.assertEqual(pop.alleleNames(2), ('C', 'D')) self.assertEqual(pop.alleleNames(3), ('A',)) self.assertEqual(pop.alleleNames(4), ('E',)) self.assertEqual(pop.alleleNames(5), ('B',)) def testAddLoci(self): 'Testing Population::addLoci(chrom, pos, names=[])' # special cases where the destination chromosome is empty # empty chromosome is the last pop = Population(size=5, ploidy = 2, loci = [1,0], lociPos = [0.4], chromNames = ['One', 'Two'], lociNames = ['selSite']) initGenotype(pop, freq=[0.5, 0.5]) g1 = [x.allele(0, 0) for x in pop.individuals()] + [x.allele(0, 1) for x in pop.individuals()] pop.addLoci(chrom = 1, pos = 0.1, lociNames = '') self.assertEqual(pop.numChrom(), 2) self.assertEqual(pop.numLoci(0), 1) self.assertEqual(pop.numLoci(1), 1) self.assertEqual(pop.locusName(0), 'selSite') self.assertEqual(pop.locusName(1), '') g1_after = [x.allele(0, 0) for x in pop.individuals()] + [x.allele(0, 1) for x in pop.individuals()] self.assertEqual(g1, g1_after) g2 = [x.allele(1, 0) for x in pop.individuals()] + [x.allele(1, 1) for x in pop.individuals()] self.assertEqual(g2, [0]*10) # # empty chromosome is the first pop = Population(size=5, ploidy = 2, loci = [0,1], lociPos = [0.4], chromNames = ['One', 'Two'], lociNames = ['selSite']) initGenotype(pop, freq=[0.5, 0.5]) g1 = [x.allele(0, 0) for x in pop.individuals()] + [x.allele(0, 1) for x in pop.individuals()] pop.addLoci(chrom = 0, pos = 0.1, lociNames = '') self.assertEqual(pop.numChrom(), 2) self.assertEqual(pop.numLoci(0), 1) self.assertEqual(pop.numLoci(1), 1) self.assertEqual(pop.locusName(0), '') self.assertEqual(pop.locusName(1), 'selSite') g1_after = [x.allele(1, 0) for x in pop.individuals()] + [x.allele(1, 1) for x in pop.individuals()] self.assertEqual(g1, g1_after) g2 = [x.allele(0, 0) for x in pop.individuals()] + [x.allele(0, 1) for x in pop.individuals()] self.assertEqual(g2, [0]*10) # # empty chromosome is in the middle pop = Population(size=5, ploidy = 2, loci = [1,0,0,1], lociPos = [0.4, 0.4], chromNames = ['One', 'Two', 'Three', 'Four'], lociNames = ['on_ch1', 'on_ch4']) initGenotype(pop, freq=[0.5, 0.5]) g1 = [x.allele(0, 0) for x in pop.individuals()] + [x.allele(0, 1) for x in pop.individuals()] g2 = [x.allele(1, 0) for x in pop.individuals()] + [x.allele(1, 1) for x in pop.individuals()] pop.addLoci(chrom = 2, pos = 0.1, lociNames = 'new') self.assertEqual(pop.numChrom(), 4) self.assertEqual(pop.numLoci(0), 1) self.assertEqual(pop.numLoci(1), 0) self.assertEqual(pop.numLoci(2), 1) self.assertEqual(pop.numLoci(3), 1) self.assertEqual(pop.locusName(0), 'on_ch1') self.assertEqual(pop.locusName(1), 'new') self.assertEqual(pop.locusName(2), 'on_ch4') g1_after = [x.allele(0, 0) for x in pop.individuals()] + [x.allele(0, 1) for x in pop.individuals()] g2_after = [x.allele(2, 0) for x in pop.individuals()] + [x.allele(2, 1) for x in pop.individuals()] self.assertEqual(g1, g1_after) self.assertEqual(g2, g2_after) g3_after = [x.allele(1, 0) for x in pop.individuals()] + [x.allele(1, 1) for x in pop.individuals()] self.assertEqual(g3_after, [0]*10) # pop = self.getPop(size = 100, chromNames=["c1", "c2"], ancGen=5, lociPos=[1, 3, 5], lociNames = ['l1', 'l2', 'l3']) pop1 = pop.clone() newpos = pop.addLoci([0, 1, 1], [2, 6, 7], ['l4', 'l5', 'l6']) self.assertEqual(pop.numLoci(), (2, 4)) self.assertEqual(pop.lociPos(), (1, 2, 3, 5, 6, 7)) for gen in range(pop.ancestralGens(), -1, -1): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) for idx in range(pop.popSize()): ind = pop.individual(idx) ind1 = pop1.individual(idx) # i: index in population # j: index in source Population for i, j in [(0, 0), (2, 1), (3, 2)]: for p in range(pop.ploidy()): self.assertEqual(ind.allele(i, p), ind1.allele(j, p)) for k in newpos: self.assertEqual(ind.allele(k), 0) self.assertRaises(ValueError, pop.addLoci, [2], [8], ['l7']) # def testDeepcopy(self): 'Testing deepcopy of population' pop = self.getPop(False, ancGen=3) initSex(pop) initGenotype(pop, freq=[0.2, 0.8]) # shallow copy pop1 = pop initSex(pop1) initGenotype(pop1, freq= [0.8, 0.2]) self.assertEqual(pop, pop1) # deep copy pop1 = pop.clone() self.assertEqual(pop, pop1) initSex(pop1) initGenotype(pop1, freq= [0.5, 0.5]) self.assertNotEqual(pop, pop1) # using Python copy.copy pop1 = copy.copy(pop) self.assertEqual(pop, pop1) initSex(pop1) initGenotype(pop1, freq= [0.5, 0.5]) self.assertEqual(pop, pop1) # using Python copy.deepcopy pop1 = copy.deepcopy(pop) self.assertEqual(pop, pop1) initSex(pop1) initGenotype(pop1, freq= [0.5, 0.5]) self.assertNotEqual(pop, pop1) def testMergeSubPops(self): 'Testing Population::MergeSubPops(subpops=[])' pop = self.getPop(size=[100, 20, 30, 80, 50, 60], subPopNames=['A', 'B', 'C', 'D', 'E', 'F']) pop1 = pop.clone() pop.mergeSubPops([1, 2, 4]) self.assertEqual(pop.subPopSize(1), pop1.subPopSize(1)+pop1.subPopSize(2)+pop1.subPopSize(4)) for (oldsp, newsp) in [(0, 0), (3, 2), (5, 3)]: # map of old and new id. self.assertEqual(pop1.subPopSize(oldsp), pop.subPopSize(newsp)) self.assertEqual(pop1.subPopName(oldsp), pop.subPopName(newsp)) for idx in range(pop1.subPopSize(oldsp)): self.assertEqual(pop1.individual(idx, oldsp), pop.individual(idx, newsp)) # set new name to merged subpopulation pop = self.getPop(size=[100, 20, 30, 80, 50, 60], subPopNames=['A', 'B', 'C', 'D', 'E', 'F']) sp = pop.mergeSubPops([2, 1, 4], name='new') self.assertEqual(sp, 1) self.assertEqual(pop.subPopName(sp), 'new') self.assertEqual(pop.subPopNames(), ('A', 'new', 'D', 'F')) self.assertEqual(pop.subPopSize(1), pop1.subPopSize(1)+pop1.subPopSize(2)+pop1.subPopSize(4)) # # merge to a specified subpopulation pop = self.getPop(size=[100, 20, 30, 80, 50, 60], subPopNames=['A', 'B', 'C', 'D', 'E', 'F']) pop1 = pop.clone() self.assertRaises(ValueError, pop.mergeSubPops, [1, 3, 4], toSubPop=2) pop.mergeSubPops([1, 3, 4], toSubPop=3) self.assertEqual(pop.subPopSize(2), pop1.subPopSize(1)+pop1.subPopSize(3)+pop1.subPopSize(4)) for (oldsp, newsp) in [(0, 0), (2, 1), (5, 3)]: # map of old and new id. self.assertEqual(pop1.subPopSize(oldsp), pop.subPopSize(newsp)) self.assertEqual(pop1.subPopName(oldsp), pop.subPopName(newsp)) for idx in range(pop1.subPopSize(oldsp)): self.assertEqual(pop1.individual(idx, oldsp), pop.individual(idx, newsp)) # set new name to merged subpopulation pop = self.getPop(size=[100, 20, 30, 80, 50, 60], subPopNames=['A', 'B', 'C', 'D', 'E', 'F']) pop1 = pop.clone() sp = pop.mergeSubPops([2, 1, 4], toSubPop=4, name='new') for (oldsp, newsp) in [(0, 0), (3, 1), (5, 3)]: # map of old and new id. self.assertEqual(pop1.subPopSize(oldsp), pop.subPopSize(newsp)) self.assertEqual(pop1.subPopName(oldsp), pop.subPopName(newsp)) for idx in range(pop1.subPopSize(oldsp)): self.assertEqual(pop1.individual(idx, oldsp), pop.individual(idx, newsp)) self.assertEqual(sp, 2) self.assertEqual(pop.subPopName(sp), 'new') self.assertEqual(pop.subPopNames(), ('A', 'D', 'new', 'F')) self.assertEqual(pop.subPopSize(2), pop1.subPopSize(1)+pop1.subPopSize(2)+pop1.subPopSize(4)) def testRemoveSubPops(self): 'Testing Population::removeSubPops()' pop = self.getPop(size=[0, 100, 0, 20, 30, 0, 50], subPopNames=['A', 'B', 'C', 'D', 'E', 'F', 'G']) initSex(pop) initGenotype(pop, freq=[0.5, 0.5]) pop1 = pop.clone() self.assertEqual(pop.numSubPop(), 7) pop.removeSubPops([x for x in range(7) if pop.subPopSize(x) == 0]) self.assertEqual(pop.numSubPop(), 4) self.assertEqual(pop.subPopSizes(), (100, 20, 30, 50)) for (oldsp, newsp) in [(1, 0), (3, 1), (4, 2), (6, 3)]: # map of old and new id. self.assertEqual(pop1.subPopSize(oldsp), pop.subPopSize(newsp)) self.assertEqual(pop1.subPopName(oldsp), pop.subPopName(newsp)) for idx in range(pop1.subPopSize(oldsp)): self.assertEqual(pop1.individual(idx, oldsp), pop.individual(idx, newsp)) # remove subpop pop2 = pop.clone() pop.removeSubPops([1, 2]) self.assertEqual(pop.subPopSizes(), (100, 50)) for (oldsp, newsp) in [(0, 0), (3, 1)]: # map of old and new id. self.assertEqual(pop2.subPopSize(oldsp), pop.subPopSize(newsp)) self.assertEqual(pop2.subPopName(oldsp), pop.subPopName(newsp)) for idx in range(pop2.subPopSize(oldsp)): self.assertEqual(pop2.individual(idx, oldsp), pop.individual(idx, newsp)) self.assertRaises(IndexError, pop.removeSubPops, [8]) # accept single input pop.removeSubPops(0) # # now for virtual subpopulation pop = self.getPop(size=[0, 100, 0, 20], subPopNames=['A', 'B', 'C', 'D']) initGenotype(pop, freq=[0.5, 0.5]) initSex(pop) pop.setVirtualSplitter(SexSplitter()) numFemale = pop.subPopSize([1,1]) pop.removeSubPops([(1,0), 2]) self.assertEqual(pop.numSubPop(), 3) self.assertEqual(pop.subPopSizes(), (0, numFemale, 20)) for ind in pop.individuals(1): self.assertEqual(ind.sex(), FEMALE) # continue... pop.removeSubPops([(1,1), 2]) self.assertEqual(pop.numSubPop(), 2) self.assertEqual(pop.subPopSizes(), (0, 0)) # # test if allele lineage is correctly removed if moduleInfo()['alleleType'] != 'lineage': return pop = Population([10]*10, loci=10) pop.lineage()[:] = range(2000) pop.removeSubPops(range(1, 10, 2)) # 0, 1, .... 20 | ... | 199 # removed 200 ... 399 # 400 ... 599 for sp in range(5): for idx, ind in enumerate(pop.individuals(sp)): self.assertEqual(ind.lineage(), range(200*2*sp + idx*20, 200*2*sp + idx*20 + 20)) def testRemoveIndividuals(self): 'Testing Population::removeIndividuals(inds)' pop = self.getPop(size =[20, 100, 30], subPopNames=['sp1', 'sp2', 'sp3']) pop1 = pop.clone() pop.removeIndividuals([15]) self.assertEqual(pop.subPopSizes(), (19, 100, 30)) for idx in range(15): self.assertEqual(pop1.individual(idx), pop.individual(idx)) for idx in range(15, pop.popSize()): self.assertEqual(pop1.individual(idx+1), pop.individual(idx)) # accept single input pop.removeIndividuals(2) # 1) pop.removeIndividuals([500]) should yield an exception. pop = pop1.clone() self.assertRaises(IndexError, pop.removeIndividuals, 500) # 2) pop.removeIndividuals([]) should not change anything (self.assertEqual(pop, pop1)) pop = pop1.clone() pop.removeIndividuals([]) self.assertEqual(pop, pop1) # 3) pop.removeIndividuals(range(15, 25)) ... pop = pop1.clone() inds = list(range(15, 25)) random.shuffle(inds) pop.removeIndividuals(inds) self.assertEqual(pop.subPopSizes(), (15, 95, 30)) for idx in range(15): self.assertEqual(pop1.individual(idx), pop.individual(idx)) for idx in range(24, pop.popSize()): self.assertEqual(pop1.individual(idx+10), pop.individual(idx)) # 4) pop.removeIndividuals(range(15, 125)) removes the middle subpopulation # and some individuals in subpopulation 0? Check if subpopulation name is handled correctly. pop = pop1.clone() inds = list(range(15, 125)) random.shuffle(inds) pop.removeIndividuals(inds) self.assertEqual(pop.subPopSizes(), (15, 0, 25)) for idx in range(15): self.assertEqual(pop1.individual(idx), pop.individual(idx)) for idx in range(15, pop.popSize()): self.assertEqual(pop1.individual(idx+110), pop.individual(idx)) self.assertEqual(pop.subPopNames(), pop1.subPopNames()) # 5) pop.removeIndividuals(range(pop.subPopBegin(1), pop.subPopEnd(1))) removes the middle subpopulation. # Check if subpopulation name is handled correctly. pop = pop1.clone() inds = list(range(pop.subPopBegin(1), pop.subPopEnd(1))) random.shuffle(inds) pop.removeIndividuals(inds) self.assertEqual(pop.subPopSizes(), (20, 0, 30)) for idx in range(20): self.assertEqual(pop1.individual(idx), pop.individual(idx)) for idx in range(21, pop.popSize()): self.assertEqual(pop1.individual(idx+100), pop.individual(idx)) self.assertEqual(pop.subPopNames(), pop1.subPopNames()) # 6) pop.removeIndividuals(range(pop.popSize())) removes all individuals in this population. pop = pop1.clone() inds = list(range(0, 150)) random.shuffle(inds) pop.removeIndividuals(inds) self.assertEqual(pop.subPopSizes(), (0, 0, 0)) self.assertEqual(pop.subPopNames(), pop1.subPopNames()) # # # by ID? pop = self.getPop(size=[100, 200], loci=[2, 3, 1], ancGen=5, infoFields=['ind_id']) for gen in range(6): pop.useAncestralGen(gen) initGenotype(pop, freq=[0.5, 0.5]) pop.useAncestralGen(0) IdTagger().reset(1) tagID(pop) exclude = set([random.randint(1, 1800) for x in range(600)]) pop1 = pop.clone() pop1.removeIndividuals(IDs=list(exclude)) sz = [] sz1 = [] for gen in range(6): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) sz.append(pop.popSize()) sz1.append(pop1.popSize()) id = set(pop.indInfo('ind_id')) id1 = set(pop1.indInfo('ind_id')) for e in exclude: self.assertEqual(e in id1, False) for ind in pop1.individuals(): self.assertEqual(ind, pop.indByID(ind.ind_id), gen) self.assertEqual(sum(sz), sum(sz1) + len(exclude)) # remove multiple individual pop = Population(10, infoFields='x') pop.setIndInfo([1, 2, 2, 3, 4, 5, 2, 3, 4, 3], 'x') pop.removeIndividuals(IDs=2, idField='x') self.assertEqual(pop.popSize(), 7) self.assertEqual(pop.indInfo('x'), (1, 3, 4, 5, 3, 4, 3)) pop.removeIndividuals(IDs=[2,3,4], idField='x') self.assertEqual(pop.popSize(), 2) self.assertEqual(pop.indInfo('x'), (1, 5)) # by filter function pop = Population(10, infoFields='x') pop.setIndInfo([1, 2, 2, 3, 4, 5, 2, 3, 4, 3], 'x') pop.removeIndividuals(filter=lambda ind: ind.x in [3, 4]) self.assertEqual(pop.popSize(), 5) self.assertEqual(pop.indInfo('x'), (1, 2, 2, 5, 2)) # test if allele lineage is correctly removed if moduleInfo()['alleleType'] != 'lineage': return pop = Population([10]*10, loci=10) pop.lineage()[:] = range(2000) pop.removeIndividuals(range(0, 100,2)) lin = list(pop.lineage()) self.assertEqual(len(lin), 1000) # the lineage of the remaining individuals are correctly handled for idx,ind in enumerate(pop.individuals()): self.assertEqual(ind.lineage(), range((idx*2+1)*20, (idx*2+2)*20)) def testExtractSubPops(self): 'Testing Population::extractSubPops()' pop = self.getPop(size=[0, 100, 0, 20, 30, 0, 50], subPopNames=['A', 'B', 'C', 'D', 'E', 'F', 'G']) initSex(pop) initGenotype(pop, freq=[0.5, 0.5]) initLineage(pop, range(10)) self.assertEqual(pop.numSubPop(), 7) pop1 = pop.extractSubPops([x for x in range(7) if pop.subPopSize(x) != 0]) self.assertEqual(pop1.numSubPop(), 4) self.assertEqual(pop1.subPopSizes(), (100, 20, 30, 50)) for (oldsp, newsp) in [(1, 0), (3, 1), (4, 2), (6, 3)]: # map of old and new id. self.assertEqual(pop.subPopSize(oldsp), pop1.subPopSize(newsp)) self.assertEqual(pop.subPopName(oldsp), pop1.subPopName(newsp)) for idx in range(pop.subPopSize(oldsp)): self.assertEqual(pop.individual(idx, oldsp), pop1.individual(idx, newsp)) # extract subpop pop2 = pop1.extractSubPops([1, 2]) self.assertEqual(pop2.subPopSizes(), (20, 30)) for (oldsp, newsp) in [(1, 0), (2, 1)]: # map of old and new id. self.assertEqual(pop1.subPopSize(oldsp), pop2.subPopSize(newsp)) self.assertEqual(pop1.subPopName(oldsp), pop2.subPopName(newsp)) for idx in range(pop1.subPopSize(oldsp)): self.assertEqual(pop1.individual(idx, oldsp), pop2.individual(idx, newsp)) self.assertRaises(IndexError, pop.extractSubPops, [8]) # accept single input pop.extractSubPops(0) # # now for virtual subpopulation pop = self.getPop(size=[0, 100, 0, 20], subPopNames=['A', 'B', 'C', 'D']) initGenotype(pop, freq=[0.5, 0.5]) initSex(pop) pop.setVirtualSplitter(SexSplitter()) numMale = pop.subPopSize([1,0]) pop = pop.extractSubPops([(1,0), 3]) self.assertEqual(pop.numSubPop(), 2) self.assertEqual(pop.subPopSizes(), (numMale, 20)) for ind in pop.individuals(0): self.assertEqual(ind.sex(), MALE) # continue... pop1 = pop.extractSubPops([(0,1), 1]) self.assertEqual(pop1.numSubPop(), 2) self.assertEqual(pop1.subPopSizes(), (0, 20)) # remove multiple individual pop = Population(10, infoFields='x') pop.setIndInfo([1, 2, 2, 3, 4, 5, 2, 3, 4, 3], 'x') pop1 = pop.extractIndividuals(IDs=2, idField='x') self.assertEqual(pop1.popSize(), 3) self.assertEqual(pop1.indInfo('x'), (2, 2, 2)) pop1 = pop.extractIndividuals(IDs=[2,3,4], idField='x') self.assertEqual(pop1.popSize(), 8) self.assertEqual(pop1.indInfo('x'), (2, 2, 3, 4, 2, 3, 4, 3)) # by filter function pop = Population(10, infoFields='x') pop.setIndInfo([1, 2, 2, 3, 4, 5, 2, 3, 4, 3], 'x') pop1 = pop.extractIndividuals(filter=lambda ind: ind.x in [3, 4]) self.assertEqual(pop1.popSize(), 5) self.assertEqual(pop1.indInfo('x'), (3, 4, 3, 4, 4)) def testExtractSubPops(self): 'Testing Population::extractSubPops()' pop = self.getPop(size=[0, 100, 0, 20, 30, 0, 50], subPopNames=['A', 'B', 'C', 'D', 'E', 'F', 'G']) initSex(pop) initGenotype(pop, freq=[0.5, 0.5]) self.assertEqual(pop.numSubPop(), 7) pop1 = pop.extractSubPops([x for x in range(7) if pop.subPopSize(x) != 0]) self.assertEqual(pop1.numSubPop(), 4) def testRearrangedExtractSubPops(self): 'Testing Population::extractSubPops(subPops, true)' pop = self.getPop(size=[0, 100, 0, 20, 30, 0, 50], subPopNames=['A', 'B', 'C', 'D', 'E', 'F', 'G']) initSex(pop) initGenotype(pop, freq=[0.5, 0.5]) self.assertEqual(pop.numSubPop(), 7) pop1 = pop.extractSubPops([x for x in range(6, 0, -1) if pop.subPopSize(x) != 0], True) self.assertEqual(pop1.numSubPop(), 4) self.assertEqual(pop1.subPopSizes(), (50, 30, 20, 100)) for (oldsp, newsp) in [(6, 0), (4, 1), (3, 2), (1, 3)]: # map of old and new id. self.assertEqual(pop.subPopSize(oldsp), pop1.subPopSize(newsp)) self.assertEqual(pop.subPopName(oldsp), pop1.subPopName(newsp)) for idx in range(pop.subPopSize(oldsp)): self.assertEqual(pop.individual(idx, oldsp), pop1.individual(idx, newsp)) # extract subpop pop2 = pop1.extractSubPops([2, 1], True) self.assertEqual(pop2.subPopSizes(), (20, 30)) for (oldsp, newsp) in [(2, 0), (1, 1)]: # map of old and new id. self.assertEqual(pop1.subPopSize(oldsp), pop2.subPopSize(newsp)) self.assertEqual(pop1.subPopName(oldsp), pop2.subPopName(newsp)) for idx in range(pop1.subPopSize(oldsp)): self.assertEqual(pop1.individual(idx, oldsp), pop2.individual(idx, newsp)) self.assertRaises(IndexError, pop.extractSubPops, [8]) # accept single input pop.extractSubPops(0, True) # # now for virtual subpopulation pop = self.getPop(size=[0, 100, 0, 20], subPopNames=['A', 'B', 'C', 'D']) initGenotype(pop, freq=[0.5, 0.5]) initSex(pop) pop.setVirtualSplitter(SexSplitter()) numMale = pop.subPopSize([1,0]) pop = pop.extractSubPops([3, (1,0), (1,1)], True) self.assertEqual(pop.numSubPop(), 3) self.assertEqual(pop.subPopSizes(), (20, numMale, 100-numMale)) for ind in pop.individuals(1): self.assertEqual(ind.sex(), MALE) for ind in pop.individuals(2): self.assertEqual(ind.sex(), FEMALE) # continue... pop1 = pop.extractSubPops([(0,1), 1], True) self.assertEqual(pop1.numSubPop(), 2) self.assertEqual(pop1.subPopSize(1), numMale) def testExtractIndividuals(self): 'Testing Population::removeIndividuals(inds)' pop = self.getPop(size =[20, 100, 30], subPopNames=['sp1', 'sp2', 'sp3']) initSex(pop) initGenotype(pop, freq=[0.4, 0.6]) pop1 = pop.extractIndividuals() self.assertEqual(pop1.subPopSizes(), (0, 0, 0)) self.assertEqual(pop1.subPopNames(), ('sp1', 'sp2', 'sp3')) pop1 = pop.extractIndividuals([15, 110, 120, 121]) self.assertEqual(pop1.subPopSizes(), (1, 1, 2)) for idx,oldidx in enumerate([15, 110, 120, 121]): self.assertEqual(pop1.individual(idx), pop.individual(oldidx)) # accept single input pop.extractIndividuals(2) # 1) pop.extractIndividuals([500]) should yield an exception. self.assertRaises(IndexError, pop.extractIndividuals, 500) # # FIXME: Needs more tests # # by ID? pop = self.getPop(size=[100, 200], loci=[2, 3, 1], ancGen=5, infoFields=['ind_id']) for gen in range(6): pop.useAncestralGen(gen) initGenotype(pop, freq=[0.5, 0.5]) pop.useAncestralGen(0) tagID(pop, reset=True) include = set([random.randint(1, 1800) for x in range(600)]) pop1 = pop.extractIndividuals(IDs=list(include)) sz1 = [] for gen in range(6): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) sz1.append(pop1.popSize()) id1 = set(pop1.indInfo('ind_id')) for e in id1: self.assertEqual(e in include, True) for ind in pop1.individuals(): self.assertEqual(ind, pop.indByID(ind.ind_id), gen) self.assertEqual(sum(sz1), len(include)) def testRemoveLoci(self): 'Testing Population::removeLoci(loci=[], keep=[])' # Fixme: test loci, and keep, and test unordered parameters pop = self.getPop(size=[1, 2], loci=[2, 3, 1], ancGen=5) pop1 = pop.clone() # FIXME: test remove multiple loci from multiple chromosomes, # which may not be in order pop.removeLoci(2) for gen in range(pop.ancestralGens(), -1, -1): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) for idx in range(pop.popSize()): ind = pop.individual(idx) ind1 = pop1.individual(idx) for loc in range(2): self.assertEqual(ind.allele(loc), ind1.allele(loc)) for loc in range(2, 5): self.assertEqual(ind.allele(loc), ind1.allele(loc+1)) # test the possibility of using loci names to remove loci pop = self.getPop(size=[1, 2], loci=[2, 3, 2], lociNames=['a%d' % x for x in range(7)], ancGen=5) pop1 = pop.clone() # FIXME: test remove multiple loci from multiple chromosomes, # which may not be in order pop.removeLoci('a2') pop.removeLoci(['a4', 'a5']) self.assertEqual(pop.numLoci(), (2,1,1)) for gen in range(pop.ancestralGens(), -1, -1): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) for idx in range(pop.popSize()): ind = pop.individual(idx) ind1 = pop1.individual(idx) for loc in range(2): self.assertEqual(ind.allele(loc), ind1.allele(loc)) self.assertEqual(ind.allele(2), ind1.allele(3)) self.assertEqual(ind.allele(3), ind1.allele(6)) # # testing remove the last locus if moduleInfo()['alleleType'] == 'binary': pop = Population(100, loci=10) initGenotype(pop, haplotypes=range(10)) pop.removeLoci(9) self.assertEqual(pop.individual(0).genotype(), [0]+[1]*8 + [0] + [1]*8) # pop.removeLoci(2) self.assertEqual(pop.individual(0).genotype(), [0]+[1]*7 + [0] + [1]*7) # pop.addLoci(0, 2.8, 'test') self.assertEqual(pop.individual(0).genotype(), [0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1]) pop.removeLoci('test') self.assertEqual(pop.individual(0).genotype(), [0] + [1]*7 + [0] + [1]*7) else: pop = Population(100, loci=10) initGenotype(pop, haplotypes=range(10)) pop.removeLoci(9) self.assertEqual(pop.individual(0).genotype(), list(range(9)) + list(range(9))) # pop.removeLoci(2) self.assertEqual(pop.individual(0).genotype(), [0, 1, 3, 4, 5, 6, 7, 8, 0, 1, 3, 4, 5, 6, 7, 8]) # pop.addLoci(0, 2.8, 'test') self.assertEqual(pop.individual(0).genotype(), [0, 1, 0, 3, 4, 5, 6, 7, 8, 0, 1, 0, 3, 4, 5, 6, 7, 8]) pop.removeLoci('test') self.assertEqual(pop.individual(0).genotype(), [0, 1, 3, 4, 5, 6, 7, 8, 0, 1, 3, 4, 5, 6, 7, 8]) def testRecodeAlleles(self): 'Testing Population::recodeAlleles(alleles, loci)' pop = self.getPop(size=[10, 20], loci=[4, 5], ancGen=0, lociNames=['a%d' % x for x in range(9)]) initSex(pop) initGenotype(pop, freq=[.2, .8]) old = list(pop.genotype()) # switch 0 and 1 pop.recodeAlleles([1, 0]) new = list(pop.genotype()) for x,y in zip(old, new): self.assertEqual(x + y, 1) # clear to 0 pop.recodeAlleles([0, 0], alleleNames=[['A']]) self.assertEqual(pop.genotype(), [0]*(pop.totNumLoci()*pop.popSize()*pop.ploidy())) # use a function? def func(allele, locus): return allele + locus # pop.recodeAlleles(func, loci=1) self.assertEqual(pop.genotype(), [0, 1, 0, 0, 0, 0, 0, 0, 0] * (pop.popSize()*pop.ploidy())) # recode specified loci. pop = self.getPop(size=[10, 20], loci=[4, 5], ancGen=0, lociNames=['a%d' % x for x in range(9)]) initSex(pop) initGenotype(pop, freq=[0, .2, .8]) pop.recodeAlleles([0]*5, loci=(2, 4)) for ind in pop.individuals(): for loc in range(9): for p in range(2): if loc in [2, 4]: self.assertEqual(ind.allele(loc, p), 0) else: self.assertNotEqual(ind.allele(loc, p), 0) # using loci names pop.recodeAlleles([1]*10, loci=['a2', 'a4']) for ind in pop.individuals(): for loc in ['a2', 'a4']: for p in range(2): self.assertEqual(ind.allele(pop.locusByName(loc), p), 1) # FIXME: recode ancestral generations. def testResize(self): 'Testing Population::resize(newSubPopSizes, propagate=false)' pop = self.getPop(size=[100, 20, 30], loci=[4, 5, 1]) initSex(pop) initGenotype(pop, freq=[.2, .3, .5]) pop1 = pop.clone() pop2 = pop.clone() # number of subpop mismatch self.assertRaises(ValueError, pop1.resize, [50, 50]) # resize without propagation pop1.resize([50, 50, 80], propagate=False) for sp in range(pop1.numSubPop()): for i in range(min(pop1.subPopSize(sp), pop.subPopSize(sp))): self.assertEqual(pop1.individual(i, sp), pop.individual(i, sp)) for i in range(min(pop1.subPopSize(sp), pop.subPopSize(sp)), pop1.subPopSize(sp)): self.assertEqual(pop1.individual(i, sp).genotype(), [0]*20) # resize with Population pop2.resize([50, 50, 80], propagate=True) for sp in range(pop1.numSubPop()): for i in range(pop2.subPopSize(sp)): self.assertEqual(pop2.individual(i, sp), pop.individual(i%pop.subPopSize(sp), sp)) # resize from empty subpopulation? pop = self.getPop(size=[100, 0, 30, 0], loci=[4, 5, 1]) self.assertEqual(pop.subPopSizes(), (100, 0, 30, 0)) pop.resize([100, 20, 50, 20]) self.assertEqual(pop.subPopSizes(), (100, 20, 50, 20)) def testSplitSubPop(self): 'Testing Population::splitSubPop(subPop, sizes)' pop = Population(size=[100, 80, 50], subPopNames=['A', 'B', 'C']) pop1 = pop.clone() self.assertRaises(ValueError, pop.splitSubPop, 1, [20, 70]) ids = pop.splitSubPop(1, [20, 60]) self.assertEqual(ids, (1, 2)) self.assertEqual(pop1.subPopSize(1), pop.subPopSize(1)+pop.subPopSize(2)) self.assertEqual(pop1.subPopName(1), pop.subPopName(1)) self.assertEqual(pop1.subPopName(1), pop.subPopName(2)) for idx in range(20): self.assertEqual(pop1.individual(idx, 1), pop. individual(idx, 1)) for idx in range(20, 80): self.assertEqual(pop1.individual(idx, 1), pop. individual(idx-20, 2)) for (oldsp, newsp) in [(0, 0), (2, 3)]: # map of old and new id. self.assertEqual(pop1.subPopSize(oldsp), pop.subPopSize(newsp)) self.assertEqual(pop1.subPopName(oldsp), pop.subPopName(newsp)) for idx in range(pop1.subPopSize(oldsp)): self.assertEqual(pop1.individual(idx, oldsp), pop.individual(idx, newsp)) # assign new names to split subpopulation pop = Population(size=[100, 80, 50]) self.assertRaises(ValueError, pop.splitSubPop, 1, [20, 70], names=['A1']) ids = pop.splitSubPop(1, [20, 60], names=['A1', 'A2']) self.assertEqual(ids, (1, 2)) self.assertEqual(pop.subPopName(1), 'A1') self.assertEqual(pop.subPopName(2), 'A2') self.assertEqual(pop.subPopNames(), ('', 'A1', 'A2', '')) # split by proportion pop = Population(size=[100, 80, 50]) self.assertRaises(ValueError, pop.splitSubPop, 1, [0.2, 0.7]) pop.splitSubPop(1, [0.3, 0.7]) self.assertEqual(pop.subPopSizes(), (100, 24, 56, 50)) pop.splitSubPop(0, [0.333, 0.667]) self.assertEqual(pop.subPopSizes(), (33, 67, 24, 56, 50)) def testSetSubPopByIndInfo(self): 'Testing Population::setSubPopByIndInfo(field)' pop = self.getPop(subPopNames=['A', 'B']) for ind in pop.individuals(): n = random.randint(-1, 5) ind.setInfo(n, 'x') pop1 = pop.setSubPopByIndInfo('x') self.assertEqual(pop.numSubPop(), 6) self.assertEqual(pop.subPopName(0), 'A') self.assertEqual(pop.subPopName(1), 'B') for i in range(2, 6): self.assertEqual(pop.subPopName(i), '') # apply this function to an empty information would crash simuPOP (issue #19) pop = Population(size=0, infoFields='a') pop.setSubPopByIndInfo('a') def testSortIndividuals(self): 'Testing Population::sortIndividuals(infoFields)' pop = self.getPop(size=[1000, 2000], infoFields=['a', 'b']) initInfo(pop, lambda: random.randint(1, 5), infoFields=['a', 'b']) pop.sortIndividuals('a') for sp in range(2): for i in range(1, pop.subPopSize(sp)): self.assertTrue(pop.individual(i-1, sp).a <= pop.individual(i, sp).a) self.assertTrue(pop.individual(999).a > pop.individual(0, 1).a) # sorting in reverse order initInfo(pop, lambda: random.randint(1, 5), infoFields=['a', 'b']) pop.sortIndividuals('a', reverse=True) for sp in range(2): for i in range(1, pop.subPopSize(sp)): self.assertTrue(pop.individual(i-1, sp).a >= pop.individual(i, sp).a) self.assertTrue(pop.individual(999).a < pop.individual(0, 1).a) def testAddInfoFields(self): 'Testing Population::addInfoFields(fields, init=0)' pop = self.getPop() pop1 = pop.addInfoFields('fitness', 5.0) self.assertEqual(pop.infoSize(), 2) pop1 = pop.addInfoFields('misc', 6.0) self.assertEqual(pop.infoSize(), 3) self.assertEqual(pop.indInfo('fitness'), tuple([5.0]*pop.popSize())) self.assertEqual(pop.indInfo('misc'), tuple([6.0]*pop.popSize())) pop = self.getPop() pop.addInfoFields(['x', 'fitness', 'misc'], 2.0) self.assertEqual(pop.infoSize(), 3) self.assertEqual(pop.indInfo('fitness'), tuple([2.0]*pop.popSize())) self.assertEqual(pop.indInfo('misc'), tuple([2.0]*pop.popSize())) # info field x is re-initialized self.assertEqual(pop.indInfo('x'), tuple([2.0]*pop.popSize())) # add again, but reinitialize ... pop.addInfoFields(['x', 'x', 'misc'], 5) self.assertEqual(pop.infoSize(), 3) self.assertEqual(pop.indInfo('misc'), tuple([5.0]*pop.popSize())) def testIndInfo(self): 'Testing Population::indInfo(idx), indInfo(name), indInfo(idx, subPop)' 'indInfo(name, subPop), setIndInfo(values, idx), setIndInfo(values, name)' 'setIndInfo(values, idx, subPop), setIndInfo(values, name, subPop)' # no VSP, set and read info def testSetAndRead(pop): pop.setIndInfo([3], 'x') for idx, ind in enumerate(pop.individuals()): self.assertEqual(ind.info('x'), 3) self.assertEqual(pop.indInfo('x'), tuple([3]*pop.popSize())) self.assertEqual(pop.indInfo(0), tuple([3]*pop.popSize())) self.assertEqual(pop.indInfo('x', 0), tuple([3]*pop.subPopSize(0))) # pop.setIndInfo([1, 2], 'x', 0) pop.setIndInfo([3, 4], 0, 1) for idx, ind in enumerate(pop.individuals(0)): if idx % 2 == 0: self.assertEqual(ind.info('x'), 1) else: self.assertEqual(ind.info('x'), 2) self.assertEqual(pop.indInfo('x', 0), tuple([1, 2]*(int(pop.subPopSize(0)/2)))) self.assertEqual(pop.indInfo(0, 1), tuple([3, 4]*(int(pop.subPopSize(1)/2)))) # testSetAndRead(self.getPop()) testSetAndRead(self.getPop(True)) # test for virtual subpopulation def testVSPSetAndRead(pop): pop.setIndInfo([1, 2], 'x', [1, 0]) pop.setIndInfo([3], 0, [1, 1]) for idx, ind in enumerate(pop.individuals([1, 0])): self.assertEqual(ind.sex(), MALE) if idx % 2 == 0: self.assertEqual(ind.info('x'), 1) else: self.assertEqual(ind.info('x'), 2) for idx, ind in enumerate(pop.individuals([1, 1])): self.assertEqual(ind.sex(), FEMALE) self.assertEqual(ind.info('x'), 3) self.assertEqual(pop.indInfo('x', [1, 0]), tuple(([1, 2]*pop.subPopSize(1))[:pop.subPopSize([1, 0])])) self.assertEqual(pop.indInfo(0, [1, 1]), tuple([3]*pop.subPopSize([1, 1]))) # self.assertRaises(ValueError, testVSPSetAndRead, self.getPop()) testVSPSetAndRead(self.getPop(VSP=True)) def testSetInfoFields(self): 'Testing Population::setInfoFields(fields, init=0)' pop = self.getPop() pop1 = pop.setInfoFields(['fitness', 'misc'], 3) self.assertEqual(pop.infoSize(), 2) # info field x is removed self.assertEqual(pop.indInfo('fitness'), tuple([3]*pop.popSize())) self.assertEqual(pop.indInfo('misc'), tuple([3]*pop.popSize())) # allow set duplicated info fields pop1 = pop.setInfoFields(['fitness', 'fitness'], 1) self.assertEqual(pop.infoSize(), 1) def testClone(self): 'Testing Population::clone()' pop = self.getPop(ancGen = 5) pop1 = pop.clone() for gen in range(pop.ancestralGens(), -1, -1): pop.useAncestralGen(gen) pop1.useAncestralGen(gen) self.assertEqual(pop, pop1) def testSave(self): 'Testing Population::save(filename)' pop = self.getPop(ancGen=5, infoFields=['a', 'b']) for gen in range(pop.ancestralGens(), -1, -1): initGenotype(pop, freq=[0.3, 0.7]) initSex(pop) initInfo(pop, lambda:random.randint(0, 40), infoFields=['a', 'b']) pop.save("popout") pop1 = loadPopulation("popout") self.assertEqual(pop, pop1) self.assertEqual(pop.indInfo('a'), pop1.indInfo('a')) self.assertEqual(pop.indInfo('b'), pop1.indInfo('b')) # stat(pop, alleleFreq=list(range(pop.totNumLoci()))) a = pop.dvars().alleleFreq[0][1] pop.save("popout") pop1 = loadPopulation("popout") self.assertEqual(a, pop1.dvars().alleleFreq[0][1]) self.assertEqual(pop, pop1) # # testing the save of a population with non-pickleable objects pop.dvars().module_os = os self.assertTrue('module_os' in pop.vars()) # module_os is not saved because it is a module pop.save('popout') pop1 = loadPopulation('popout') self.assertFalse('module_os' in pop1.vars()) os.remove('popout') def testCrossPlatformLoad(self): 'Testing loading populations created from other platform and allele types' localFile = 'sample_%d_%s_v3.pop' % ( \ moduleInfo()['wordsize'], {'short': 'std', 'binary': 'ba', 'long': 'la', 'mutant': 'mu', 'lineage': 'lin' }[moduleInfo()['alleleType']]) if not os.path.isfile(localFile): print('Creating local pop file') pop = Population(10000, loci=100, infoFields=['a', 'ind_id']) initGenotype(pop, genotype=[0, 1, 1, 1, 0, 1, 1]) initInfo(pop, values=[1, 2, 3, 4, 5], infoFields='a') stat(pop, alleleFreq=ALL_AVAIL) pop.save(localFile) for version in [0, 1, 2, 3]: for plat in [32, 64]: for mod in ['std', 'la', 'ba', 'mu', 'lin']: if version == 0 and mod in ['lin', 'mu']: continue if version == 0: popname = 'sample_%d_%s.pop' % (plat, mod) else: popname = 'sample_%d_%s_v1.pop' % (plat, mod) if not os.path.isfile(popname): print('Missing testing population name: %s' % popname) continue pop = Population() #print('%s %s %s' % (version, plat, mod)) try: pop = loadPopulation(popname) except: pass self.assertEqual(pop.popSize(), 10000) self.assertEqual(list(pop.indInfo('a')), [1, 2, 3, 4, 5] * int(10000 / 5)) self.assertEqual(pop.genotype(), ([0, 1, 1, 1, 0, 1, 1] * int(10000*100*2/7+1))[:10000*100*2]) if '_v3' in popname: self.assertTrue(isinstance(pop.dvars().alleleFreq, defdict)) def testVars(self): 'Testing Population::vars(), vars(subPop), dvars(), dvars(subPop)' pop = self.getPop(size=1000, loci=[2, 4]) initSex(pop) initGenotype(pop, freq=[.2, .3, .5]) stat(pop, alleleFreq=list(range(0, 6))) pop1 = pop.clone() self.assertEqual(len(pop.vars()["alleleFreq"]), 6) self.assertEqual(len(pop.dvars().alleleFreq), 6) self.assertEqual(len(pop1.vars()["alleleFreq"]), 6) self.assertEqual(len(pop1.dvars().alleleFreq), 6) # with subPop pop = self.getPop(size=[20, 80], loci=[2, 4]) initSex(pop) initGenotype(pop, freq=[.2, .3, .5]) stat(pop, alleleFreq=list(range(0, 6)), vars='alleleFreq_sp') pop1 = pop.clone() self.assertEqual(len(pop.vars(0)["alleleFreq"]), 6) self.assertEqual(len(pop.dvars(1).alleleFreq), 6) self.assertEqual(len(pop1.vars(0)["alleleFreq"]), 6) self.assertEqual(len(pop1.dvars(1).alleleFreq), 6) def testSexSplitter(self): 'Testing SexSplitter::SexSplitter()' pop = Population(size=[20, 80]) initSex(pop) initGenotype(pop, freq=[0.4, 0.6]) stat(pop, numOfMales=True, vars=['numOfMales_sp', 'numOfFemales_sp']) pop.setVirtualSplitter(SexSplitter()) self.assertEqual(pop.subPopSize([1, 0]), pop.dvars(1).numOfMales) self.assertEqual(pop.subPopSize([1, 1]), pop.dvars(1).numOfFemales) self.assertEqual(pop.subPopName([1, 0]), 'Male') self.assertEqual(pop.subPopName([1, 1]), 'Female') for ind in pop.individuals([0, 0]): self.assertEqual(ind.sex(), MALE) for ind in pop.individuals([0, 1]): self.assertEqual(ind.sex(), FEMALE) # test nested virtual subpopulation for ind in pop.individuals([0, 0]): self.assertEqual(ind.sex(), MALE) for ind1 in pop.individuals([0, 1]): self.assertEqual(ind1.sex(), FEMALE) numMale = 0 numFemale = 0 for ind in pop.individuals(1): if ind.sex() == MALE: numMale += 1 else: numFemale += 1 #print numMale, numFemale self.assertEqual(numMale == 0, False) self.assertEqual(numFemale == 0, False) def testAffectionSplitter(self): 'Testing AffectionSplitter::AffectionSplitter()' pop = Population(size=[20, 80], loci=[1, 2]) initSex(pop) initGenotype(pop, freq=[0.4, 0.6]) maPenetrance(pop, loci=0, wildtype=0, penetrance=[0.2, 0.4, 0.8]) stat(pop, numOfAffected=True, vars=['numOfAffected_sp', 'numOfUnaffected_sp']) pop.setVirtualSplitter(AffectionSplitter()) self.assertEqual(pop.subPopSize([1, 1]), pop.dvars(1).numOfAffected) self.assertEqual(pop.subPopSize([1, 0]), pop.dvars(1).numOfUnaffected) self.assertEqual(pop.subPopName([1, 0]), 'Unaffected') self.assertEqual(pop.subPopName([1, 1]), 'Affected') for ind in pop.individuals([1, 1]): self.assertEqual(ind.affected(), True) for ind in pop.individuals([1, 0]): self.assertEqual(ind.affected(), False) numAffected = 0 numUnaffected = 0 for ind in pop.individuals(1): if ind.affected(): numAffected += 1 else: numUnaffected += 1 self.assertEqual(numAffected == 0, False) self.assertEqual(numUnaffected == 0, False) def testInfoSplitter(self): 'Testing InfoSplitter::InfoSplitter(field, values=[], cutoff=[])' pop = Population(1000, infoFields=['x']) for ind in pop.individuals(): ind.setInfo(random.randint(10, 20), 'x') pop.setVirtualSplitter(InfoSplitter('x', values=list(range(10, 15)))) self.assertEqual(pop.numVirtualSubPop(), 5) infos = list(pop.indInfo('x')) self.assertEqual(pop.subPopName([0, 0]), "x = 10") self.assertEqual(pop.subPopName([0, 1]), "x = 11") self.assertEqual(pop.subPopName([0, 4]), "x = 14") self.assertEqual(pop.subPopSize([0, 0]), infos.count(10)) self.assertEqual(pop.subPopSize([0, 1]), infos.count(11)) self.assertEqual(pop.subPopSize([0, 2]), infos.count(12)) self.assertEqual(pop.subPopSize([0, 3]), infos.count(13)) self.assertEqual(pop.subPopSize([0, 4]), infos.count(14)) for i in range(5): for ind in pop.individuals([0, i]): self.assertEqual(ind.info('x'), 10+i) # test cutoff pop.setVirtualSplitter(InfoSplitter('x', cutoff=[11.5, 13.5])) self.assertEqual(pop.subPopName([0, 0]), "x < 11.5") self.assertEqual(pop.subPopName([0, 1]), "11.5 <= x < 13.5") self.assertEqual(pop.subPopName([0, 2]), "x >= 13.5") self.assertEqual(pop.subPopSize([0, 0]), infos.count(10) + infos.count(11)) self.assertEqual(pop.subPopSize([0, 1]), infos.count(12) + infos.count(13)) self.assertEqual(pop.subPopSize([0, 2]), sum([infos.count(x) for x in range(14, 21)])) for ind in pop.individuals([0, 0]): self.assertEqual(ind.info('x') < 11.5, True) for ind in pop.individuals([0, 1]): self.assertEqual(11.5 <= ind.info('x') < 13.5, True) for ind in pop.individuals([0, 2]): self.assertEqual(ind.info('x') >=13.5, True) # test range pop.setVirtualSplitter(InfoSplitter('x', ranges=[[11.5, 13.5], [9.5, 12.5]])) self.assertEqual(pop.numVirtualSubPop(), 2) self.assertEqual(pop.subPopName([0, 0]), "11.5 <= x < 13.5") self.assertEqual(pop.subPopName([0, 1]), "9.5 <= x < 12.5") self.assertEqual(pop.subPopSize([0, 0]), infos.count(12) + infos.count(13)) self.assertEqual(pop.subPopSize([0, 1]), infos.count(10) + infos.count(11) + infos.count(12)) for ind in pop.individuals([0, 0]): self.assertEqual(ind.info('x') >= 11.5 and ind.info('x') < 13.5, True) for ind in pop.individuals([0, 1]): self.assertEqual(9.5 <= ind.info('x') < 12.5, True) def testProportionSplitter(self): 'Testing ProportionSplitter::ProportionSplitter(proportions=[])' pop = Population(10) pop.setVirtualSplitter(ProportionSplitter([0.01]*100)) for i in range(100): self.assertEqual(pop.subPopName([0, i]), "Prop 0.01") if i != 99: self.assertEqual(pop.subPopSize([0, i]), 0) else: # the last vsp is specially treated to avoid such problem. self.assertEqual(pop.subPopSize([0, i]), 10) # pop = Population(1000) pop.setVirtualSplitter(ProportionSplitter([0.4, 0.6])) self.assertEqual(pop.subPopSize([0, 0]), 400) self.assertEqual(pop.subPopSize([0, 1]), 600) def testRangeSplitter(self): 'Testing RangeSplitter::RangeSplitter(ranges)' pop = Population(100) pop.setVirtualSplitter(RangeSplitter(ranges=[[10, 20], [80, 200]])) self.assertEqual(pop.subPopName([0, 0]), "Range [10, 20)") self.assertEqual(pop.subPopName([0, 1]), "Range [80, 200)") self.assertEqual(pop.subPopSize([0, 0]), 10) self.assertEqual(pop.subPopSize([0, 1]), 20) def testGenotypeSplitter(self): 'Testing GenotypeSplitter::GenotypeSplitter(loci(or locus), alleles, phase=False)' pop = Population(1000, loci=[2, 3]) initSex(pop) initGenotype(pop, freq=[0.3, 0.7]) pop.setVirtualSplitter(GenotypeSplitter(loci=1, alleles=[[0, 0], [1, 0]], phase=True)) self.assertEqual(pop.subPopName([0, 0]), "Genotype 1: 0 0") self.assertEqual(pop.subPopName([0, 1]), "Genotype 1: 1 0") for ind in pop.individuals([0, 0]): self.assertEqual(ind.allele(1, 0), 0) self.assertEqual(ind.allele(1, 1), 0) for ind in pop.individuals([0, 1]): self.assertEqual(ind.allele(1, 0), 1) self.assertEqual(ind.allele(1, 1), 0) stat(pop, genoFreq=[1]) self.assertEqual(pop.subPopSize([0, 0]), pop.dvars().genoNum[1][(0,0)]) self.assertEqual(pop.subPopSize([0, 1]), pop.dvars().genoNum[1][(1,0)]) # non-phased case pop.setVirtualSplitter(GenotypeSplitter(loci=1, alleles=[[0, 0], [1, 0]], phase=False)) for ind in pop.individuals([0, 0]): self.assertEqual(ind.allele(1, 0), 0) self.assertEqual(ind.allele(1, 1), 0) for ind in pop.individuals([0, 1]): self.assertEqual(ind.allele(1, 0)==0 or ind.allele(1, 0)==1, True) self.assertEqual(ind.allele(1, 1)==0 or ind.allele(1, 1)==1, True) # multiple loci pop.setVirtualSplitter(GenotypeSplitter(loci=[0, 1], alleles=[0, 1, 1, 1], phase=True)) self.assertEqual(pop.subPopName([0, 0]), "Genotype 0, 1: 0 1 1 1") for ind in pop.individuals([0, 0]): self.assertEqual(ind.allele(0, 0), 0) self.assertEqual(ind.allele(0, 1), 1) self.assertEqual(ind.allele(1, 0), 1) self.assertEqual(ind.allele(1, 1), 1) # multiple genotype at the same locus pop.setVirtualSplitter(GenotypeSplitter(loci=1, alleles=[0, 1, 1, 1], phase=True)) self.assertEqual(pop.subPopName([0, 0]), "Genotype 1: 0 1 1 1") for ind in pop.individuals([0, 0]): self.assertEqual(ind.allele(1, 0)==1 or ind.allele(1, 0)==0, True) self.assertEqual(ind.allele(1, 1), 1) # haploid case pop = Population(1000, ploidy = 1, loci=[2, 3]) initSex(pop) initGenotype(pop, freq=[0.3, 0.7]) pop.setVirtualSplitter(GenotypeSplitter(loci=1, alleles=[[0, 1], [2]], phase=True)) self.assertEqual(pop.subPopName([0, 0]), "Genotype 1: 0 1") self.assertEqual(pop.subPopName([0, 1]), "Genotype 1: 2") for ind in pop.individuals([0, 0]): self.assertEqual(ind.allele(1, 0)==1 or ind.allele(1, 0)==0, True) for ind in pop.individuals([0, 1]): self.assertEqual(ind.allele(1, 0), 2) # use of names pop = Population(1000, ploidy = 1, loci=[2, 3], lociNames=['a%d' % x for x in range(5)]) initSex(pop) initGenotype(pop, freq=[0.3, 0.7]) pop.setVirtualSplitter(GenotypeSplitter(loci='a1', alleles=[[0, 1], [2]], phase=True)) self.assertEqual(pop.subPopName([0, 0]), "Genotype a1: 0 1") self.assertEqual(pop.subPopName([0, 1]), "Genotype a1: 2") for ind in pop.individuals([0, 0]): self.assertEqual(ind.allele(1, 0)==1 or ind.allele(1, 0)==0, True) for ind in pop.individuals([0, 1]): self.assertEqual(ind.allele(1, 0), 2) def testCombinedSplitter(self): 'Testing CombinedSplitter:: CombinedSplitter(splitters=[])' pop = Population(1000, loci=[2, 3]) initSex(pop) initGenotype(pop, freq=[0.3, 0.7]) pop.setVirtualSplitter(CombinedSplitter([ GenotypeSplitter(loci=1, alleles=[[0, 0], [1, 0]], phase=True), SexSplitter()])) self.assertEqual(pop.subPopName([0, 0]), "Genotype 1: 0 0") self.assertEqual(pop.subPopName([0, 1]), "Genotype 1: 1 0") self.assertEqual(pop.subPopName([0, 2]), "Male") self.assertEqual(pop.subPopName([0, 3]), "Female") for ind in pop.individuals([0, 0]): self.assertEqual(ind.allele(1, 0), 0) self.assertEqual(ind.allele(1, 1), 0) for ind in pop.individuals([0, 1]): self.assertEqual(ind.allele(1, 0), 1) self.assertEqual(ind.allele(1, 1), 0) for ind in pop.individuals([0, 2]): self.assertEqual(ind.sex(), MALE) for ind in pop.individuals([0, 3]): self.assertEqual(ind.sex(), FEMALE) stat(pop, numOfMales=True, vars='numOfFemales_sp') self.assertEqual(pop.subPopSize([0, 3]), pop.dvars(0).numOfFemales) # # combined splitter with vspMap # pop = Population(1000, loci=[2, 3]) initSex(pop) initGenotype(pop, freq=[0.3, 0.7]) pop.setVirtualSplitter(CombinedSplitter([ GenotypeSplitter(loci=1, alleles=[[0, 0], [1, 0]], phase=True), SexSplitter()], vspMap=[[0,2], [1], [3]])) self.assertEqual(pop.numVirtualSubPop(), 3) self.assertEqual(pop.subPopName([0, 0]), "Genotype 1: 0 0 or Male") self.assertEqual(pop.subPopName([0, 1]), "Genotype 1: 1 0") self.assertEqual(pop.subPopName([0, 2]), "Female") for ind in pop.individuals([0, 0]): self.assertTrue((ind.allele(1, 0) == 0 and ind.allele(1, 1) == 0) or ind.sex() == MALE) for ind in pop.individuals([0, 1]): self.assertEqual(ind.allele(1, 0), 1) self.assertEqual(ind.allele(1, 1), 0) for ind in pop.individuals([0, 2]): self.assertEqual(ind.sex(), FEMALE) # pop = Population(1000, loci=[2], infoFields='a') initInfo(pop, random.randint(0, 3), infoFields='a') pop.setVirtualSplitter(CombinedSplitter([InfoSplitter(field='a', values=list(range(4)))], vspMap=[[0,2], [1,3]])) self.assertEqual(pop.numVirtualSubPop(), 2) self.assertEqual(pop.subPopName([0, 0]), "a = 0 or a = 2") for ind in pop.individuals([0,0]): self.assertTrue(ind.info('a') in [0, 2]) for ind in pop.individuals([0,1]): self.assertTrue(ind.info('a') in [1, 3]) self.assertEqual(pop.subPopSize([0,0]) + pop.subPopSize([0,1]), pop.popSize()) def testProductSplitter(self): 'Testing CombinedSplitter::ProductSplitter(splitters=[])' pop = Population(1000, loci=[2, 3]) initSex(pop) initGenotype(pop, freq=[0.3, 0.7]) pop.setVirtualSplitter(ProductSplitter([ GenotypeSplitter(loci=1, alleles=[[0, 0], [1, 0], [0, 1], [1, 1]], phase=True), SexSplitter()])) self.assertEqual(pop.subPopName([0, 0]), "Genotype 1: 0 0, Male") self.assertEqual(pop.subPopName([0, 1]), "Genotype 1: 0 0, Female") self.assertEqual(pop.subPopName([0, 2]), "Genotype 1: 1 0, Male") self.assertEqual(pop.subPopName([0, 3]), "Genotype 1: 1 0, Female") for ind in pop.individuals([0, 0]): self.assertEqual(ind.allele(1, 0), 0) self.assertEqual(ind.allele(1, 1), 0) self.assertEqual(ind.sex(), MALE) for ind in pop.individuals([0, 1]): self.assertEqual(ind.allele(1, 0), 0) self.assertEqual(ind.allele(1, 1), 0) self.assertEqual(ind.sex(), FEMALE) for ind in pop.individuals([0, 2]): self.assertEqual(ind.allele(1, 0), 1) self.assertEqual(ind.allele(1, 1), 0) self.assertEqual(ind.sex(), MALE) for ind in pop.individuals([0, 3]): self.assertEqual(ind.allele(1, 0), 1) self.assertEqual(ind.allele(1, 1), 0) self.assertEqual(ind.sex(), FEMALE) stat(pop, numOfMales=True) for x in range(8): self.assertTrue(pop.subPopSize([0,x]) > 0) self.assertEqual(sum([pop.subPopSize([0,x]) for x in range(0, 8, 2)]), pop.dvars().numOfMales) self.assertEqual(sum([pop.subPopSize([0,x]) for x in range(1, 8, 2)]), pop.dvars().numOfFemales) def testIndByID(self): 'Testing Population::indByID()' pop = self.getPop(size=[200]*4, ancGen=3, infoFields=['ind_id']) IdTagger().reset(1) tagID(pop) for i in range(400): id = random.randint(1, 800*4) ind = pop.indByID(id) self.assertEqual(ind.ind_id, id) self.assertRaises(IndexError, pop.indByID, 8000) def testIdentifyFamilies(self): 'Testing Pedigree::identifyFamily' pop = Population(100, infoFields=['ind_id', 'ped_id'], ancGen=1) tagID(pop, reset=True) ped = Pedigree(pop, fatherField='', motherField='', infoFields=ALL_AVAIL) pedSize = ped.identifyFamilies(pedField='ped_id') self.assertEqual(pedSize, tuple([1]*100)) self.assertEqual(ped.indInfo('ped_id'), tuple(range(100))) pop.evolve( matingScheme=RandomSelection(ops=[ CloneGenoTransmitter(), IdTagger()]), gen = 1 ) ped = Pedigree(pop, fatherField='', motherField='', infoFields=ALL_AVAIL) pedSize = ped.identifyFamilies(pedField='ped_id') self.assertEqual(pedSize, tuple([1]*200)) # pop = Population(100, infoFields=['ind_id', 'father_id', 'ped_id'], ancGen=1) tagID(pop, reset=True) pop.evolve( matingScheme=RandomSelection(ops=[ CloneGenoTransmitter(), IdTagger(), PedigreeTagger(infoFields='father_id')]), gen = 1 ) ped = Pedigree(pop, motherField='', infoFields=ALL_AVAIL) pedSize = ped.identifyFamilies(pedField='ped_id') self.assertEqual(sum(pedSize), 200) for idx, sz in enumerate(pedSize): if sz > 1: p = ped.extractIndividuals(IDs=idx, idField='ped_id') self.assertEqual(len(list(p.allIndividuals())), sz) # def testIdentifyAncestors(self): 'Testing pedigree::identifyAncestors' pop = Population(100, infoFields=['ind_id', 'father_id'], ancGen=1) tagID(pop, reset=True) pop.evolve( matingScheme=RandomSelection(ops=[ CloneGenoTransmitter(), IdTagger(), PedigreeTagger(infoFields='father_id')]), gen = 1 ) ped = Pedigree(pop, motherField='', infoFields=ALL_AVAIL) IDs = ped.identifyAncestors() self.assertTrue(len(IDs) > 100) # two parents pop = Population(100, infoFields=['ind_id', 'father_id', 'mother_id'], ancGen=3) tagID(pop, reset=True) pop.evolve( initOps = InitSex(), matingScheme=RandomMating(ops=[ MendelianGenoTransmitter(), IdTagger(), PedigreeTagger()]), gen = 5 ) pop.asPedigree() IDs = pop.identifyAncestors() self.assertTrue(len(IDs) > 200) # ancestors of selected parents IDs = pop.identifyAncestors(501) self.assertTrue(len(IDs) > 1 + 2 + 4) def testIdentifyOffspring(self): 'Testing pedigree::offspring' pop = Population(100, infoFields=['ind_id', 'father_id'], ancGen=1) tagID(pop, reset=True) pop.evolve( matingScheme=RandomSelection(ops=[ CloneGenoTransmitter(), IdTagger(), PedigreeTagger(infoFields='father_id')]), gen = 1 ) ped = Pedigree(pop, motherField='', infoFields=ALL_AVAIL) IDs = ped.identifyOffspring(1) # two parents pop = Population(100, infoFields=['ind_id', 'father_id', 'mother_id'], ancGen=3) tagID(pop, reset=True) pop.evolve( initOps = InitSex(), matingScheme=RandomMating(ops=[ MendelianGenoTransmitter(), IdTagger(), PedigreeTagger()]), gen = 5 ) pop.asPedigree() pop.useAncestralGen(3) anc = pop.indInfo('ind_id')[:10] IDs = pop.identifyOffspring(anc) len(IDs) > 20 def testDescribeEvolProcess(self): 'Testing population::evolve(dryrun=True' pop = Population(100, loci=3) tmp = sys.stdout with open(os.devnull, 'w') as sys.stdout: pop.evolve(preOps=InitSex(), matingScheme=RandomMating(), dryrun=True) sys.stdout = tmp def testLineage(self): if moduleInfo()['alleleType'] != 'lineage': return IdTagger().reset(1) pop = Population(100, infoFields='ind_id', loci=10) tagID(pop) self.assertEqual(pop.lineage(), [0] * 2000) for ind in pop.allIndividuals(): self.assertEqual(ind.lineage(), [0] * 20) # set lineage per loci initLineage(pop, range(20)) self.assertEqual(pop.lineage(), list(range(20)) * 100) for ind in pop.allIndividuals(): self.assertEqual(ind.lineage(), list(range(20))) # set lineage per ploidy initLineage(pop, list(range(2)), mode=PER_PLOIDY) self.assertEqual(pop.lineage(), (([0] * 10) + ([1] * 10)) * 100) initLineage(pop, list(range(200)), mode=PER_PLOIDY) self.assertEqual(pop.lineage(), sum([10 * [i] for i in range(200)], [])) # set lineage per individual initLineage(pop, list(range(100)), mode=PER_INDIVIDUAL) self.assertEqual(pop.lineage(), sum([20 * [i] for i in range(100)], [])) def testAllIndividuals(self): 'Testing population::allIndividuals' pop = Population([100]*10, loci=3, ancGen=-1) initSex(pop, sex=[MALE, FEMALE]) pop.setVirtualSplitter(SexSplitter()) pop1 = pop.clone() pop1.mergeSubPops(list(range(5))) self.assertEqual(pop1.numSubPop(), 6) pop.push(pop1) # sex = [x.sex() for x in pop.allIndividuals()] self.assertEqual(len(sex), 2000) sex = [x.sex() for x in pop.allIndividuals(ancGens=1)] self.assertEqual(len(sex), 1000) # virtual subpopulation sex = [x.sex() for x in pop.allIndividuals(subPops=[(0,0)], ancGens=1)] self.assertEqual(len(sex), 50) def goThrough(): [x.sex() for x in pop.allIndividuals(subPops=[(x,0) for x in range(10)], ancGens=0)] self.assertRaises(IndexError, goThrough) # this is OK sex = [x.sex() for x in pop.allIndividuals(subPops=[(x,0) for x in range(10)], ancGens=1)] for ind in pop.allIndividuals(subPops=[(x,0) for x in range(10)], ancGens=1): self.assertEqual(ind.sex(), MALE) self.assertEqual(len(sex), 500) # this is also OK sex = [x.sex() for x in pop.allIndividuals(subPops=[(ALL_AVAIL,0)])] self.assertEqual(len(sex), 1000) sex = [x.sex() for x in pop.allIndividuals(subPops=[(ALL_AVAIL,1)], ancGens=0)] for ind in pop.allIndividuals(subPops=[(ALL_AVAIL,1)]): self.assertEqual(ind.sex(), FEMALE) self.assertEqual(len(sex), 500) # this is also OK sex = [x.sex() for x in pop.allIndividuals(subPops=[(0, ALL_AVAIL)])] self.assertEqual(len(sex), 600) self.assertEqual(sex[:250], [MALE]*250) self.assertEqual(sex[250:500], [FEMALE]*250) self.assertEqual(sex[500:550], [MALE]*50) self.assertEqual(sex[550:600], [FEMALE]*50) # this is also OK sex = [x.sex() for x in pop.allIndividuals(subPops=[(ALL_AVAIL, ALL_AVAIL)])] self.assertEqual(len(sex), 2000) self.assertEqual(sex[:250], [MALE]*250) self.assertEqual(sex[250:500], [FEMALE]*250) self.assertEqual(sex[500:550], [MALE]*50) self.assertEqual(sex[550:600], [FEMALE]*50) def testIDOnlyPedigree(self): 'Testing pedigrees with only individual ID' pop = Population(500, ancGen=1, infoFields='ind_id') initSex(pop) tagID(pop) pop.asPedigree(fatherField='', motherField='') pop.save('ind.ped') ped = loadPedigree('ind.ped') os.remove('ind.ped') def testSaveLoadPedigree(self): 'Testing function loadPedigree' pop = Population(500, infoFields=['ind_id', 'father_id'], ancGen=-1, loci=[1]) tagID(pop, reset=True) pop.evolve( initOps = InitGenotype(freq=[0.5, 0.5]), matingScheme=RandomSelection(ops=[ CloneGenoTransmitter(), IdTagger(), PedigreeTagger(infoFields='father_id', output='>>test.ped')]), gen = 10 ) # pop.asPedigree(motherField='') pop.save('test1.ped', loci=0) ped1 = loadPedigree('test1.ped') ped1.save('test2.ped', loci=0) ped2 = loadPedigree('test2.ped') self.assertEqual(ped1, ped2) # pop.save('test1.ped', loci=0, infoFields='ind_id') ped1 = loadPedigree('test1.ped', infoFields='ind_id1') ped1.save('test2.ped', loci=0, infoFields='ind_id') ped2 = loadPedigree('test2.ped', infoFields='ind_id1') self.assertEqual(ped1, ped2) # ped = loadPedigree('test.ped', motherField='') self.assertEqual(ped.ancestralGens(), 10) for gen in range(11): ped.useAncestralGen(gen) if gen == 10: self.assertTrue(ped.popSize() < 500) else: self.assertEqual(ped.popSize(), 500) self.assertEqual(ped.individual(0).father_id, 0) ped.useAncestralGen(0) self.assertNotEqual(ped.individual(0).father_id, 0) # two parents pop = Population(500, loci=[2], ancGen=-1, infoFields=['ind_id', 'father_id', 'mother_id']) tagID(pop, reset=True) pop.evolve( initOps = [ InitSex(), InitGenotype(freq=[0.5, 0.5]), ], matingScheme=RandomMating(ops=[ MendelianGenoTransmitter(), IdTagger(), PedigreeTagger(output='>>test.ped')]), gen = 20 ) # pop.asPedigree() pop.save('test1.ped', loci=0) ped1 = loadPedigree('test1.ped') ped1.save('test2.ped', loci=0) ped2 = loadPedigree('test2.ped') self.assertEqual(ped1, ped2) # pop.save('test1.ped', loci=0, infoFields='ind_id') ped1 = loadPedigree('test1.ped', infoFields='ind_id1') ped1.save('test2.ped', loci=0, infoFields='ind_id') ped2 = loadPedigree('test2.ped', infoFields='ind_id1') self.assertEqual(ped1, ped2) # ped = loadPedigree('test.ped') self.assertEqual(ped.ancestralGens(), 20) for gen in range(21): ped.useAncestralGen(gen) if gen == 20: self.assertTrue(ped.popSize() < 500) else: self.assertEqual(ped.popSize(), 500) self.assertEqual(ped.individual(0).father_id, 0) self.assertEqual(ped.individual(0).mother_id, 0) ped.useAncestralGen(0) self.assertNotEqual(ped.individual(0).father_id, 0) self.assertNotEqual(ped.individual(0).mother_id, 0) # cleanup for file in ['test.ped', 'test1.ped', 'test2.ped']: os.remove(file) def testDiscardIf(self): 'Testing operator DiscardIf' pop = Population(1000, loci=2, infoFields=['a', 'b']) initInfo(pop, [1, 2, 3], infoFields='a') initInfo(pop, [1, 2, 3, 4], infoFields='b') discardIf(pop, 'a==b') for ind in pop.individuals(): self.assertNotEqual(ind.a, ind.b) def func(a): return a <= 1 discardIf(pop, func) for ind in pop.individuals(): self.assertTrue(ind.a > 1) self.assertTrue(pop.popSize() < 1000) # testing virtual subpopulation pop.setVirtualSplitter(InfoSplitter(field='b', values=[3])) discardIf(pop, True, subPops=[(0,0)]) for ind in pop.individuals(): self.assertTrue(ind.b != 3) # # test discard by probability pop = Population(1000) discardIf(pop, cond='0.5') self.assertTrue(pop.popSize() > 450) self.assertTrue(pop.popSize() < 550) # probability from expression pop = Population(1000, loci=2, infoFields='a') initInfo(pop, [0.2], infoFields='a') discardIf(pop, cond='a+0.05') self.assertTrue(pop.popSize() > 700) self.assertTrue(pop.popSize() < 800) # pop = Population([1000, 1000], loci=2, infoFields='a') initInfo(pop, [0.2], infoFields='a', subPops=0) initInfo(pop, [0.8], infoFields='a', subPops=1) discardIf(pop, cond='a', subPops=0) self.assertTrue(pop.subPopSize(0) > 750) self.assertTrue(pop.subPopSize(0) < 850) self.assertEqual(pop.subPopSize(1), 1000) # discardIf(pop, cond='a', subPops=1) self.assertTrue(pop.subPopSize(1) > 150) self.assertTrue(pop.subPopSize(1) < 250) # # pop = Population(1000, loci=2, infoFields='a') initInfo(pop, [0.2], infoFields='a') discardIf(pop, cond=lambda a: a + 0.1) self.assertTrue(pop.popSize() > 650) self.assertTrue(pop.popSize() < 750) # pop = Population([1000, 1000], loci=2, infoFields='a') initInfo(pop, [0.2], infoFields='a', subPops=0) initInfo(pop, [0.8], infoFields='a', subPops=1) discardIf(pop, cond=lambda a: a, subPops=0) self.assertTrue(pop.subPopSize(0) > 750) self.assertTrue(pop.subPopSize(0) < 850) self.assertEqual(pop.subPopSize(1), 1000) # discardIf(pop, cond=lambda a: a-0.3, subPops=1) self.assertTrue(pop.subPopSize(1) > 450) self.assertTrue(pop.subPopSize(1) < 550) # error handling self.assertRaises(Exception, discardIf, pop, cond='a2') def testMutants(self): 'Testing function Population.mutants' pop = Population([4,6], loci=20) pop.setGenotype([0,0,1]) # mutants are at # 0 0 1=2 0 0 1=5 0 0 1 0 0 1 0 0 1 0 0 1=17 0 0 <- 6 # 1=0 0 0 1=3 0 0 1 0 0 1 0 0 1 0 0 1 0 0 1=18 0 <- 7 # 0 1=1 ....................................1=19 <- 7 # 0 0 1=2 ......................................<- 6 mutants = list(pop.mutants()) self.assertEqual(len(mutants), 3 * 40 + 13) self.assertEqual([x[0] for x in mutants][:13], [2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 32, 35, 38]) self.assertEqual(len(mutants), pop.genotype().count(1)) # mutants = list(pop.mutants(1)) self.assertEqual(len(mutants), 80) self.assertEqual([x[0] for x in mutants][-13:], [2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 32, 35, 38]) self.assertEqual(len(mutants), pop.genotype(1).count(1)) # pop = Population(loci=[1, 2], size=[1, 2]) arr = list(pop.mutants()) self.assertEqual(len(arr), 0) arr = list(pop.mutants(1)) self.assertEqual(len(arr), 0) # pop.setGenotype([2, 3, 4]) arr = list(pop.mutants()) self.assertEqual(len(arr), pop.genoSize()*pop.popSize()) arr = list(pop.mutants(1)) self.assertEqual(len(arr), pop.genoSize()*pop.subPopSize(1)) # pop.setGenotype([2, 0, 4, 0, 5]) arr = list(pop.mutants()) self.assertEqual(len(arr), 11) arr = list(pop.mutants(1)) self.assertEqual(len(arr), 7) # set back pop.setGenotype([2, 0, 0, 0, 5]) arr = list(pop.mutants()) self.assertEqual(len(arr), 7) arr = list(pop.mutants(1)) self.assertEqual(len(arr), 4) # if __name__ == '__main__': unittest.main()
gpl-2.0
rajsadho/django
tests/m2m_regress/tests.py
273
4695
from __future__ import unicode_literals from django.core.exceptions import FieldError from django.test import TestCase from django.utils import six from .models import ( Entry, Line, Post, RegressionModelSplit, SelfRefer, SelfReferChild, SelfReferChildSibling, Tag, TagCollection, Worksheet, ) class M2MRegressionTests(TestCase): def test_multiple_m2m(self): # Multiple m2m references to model must be distinguished when # accessing the relations through an instance attribute. s1 = SelfRefer.objects.create(name='s1') s2 = SelfRefer.objects.create(name='s2') s3 = SelfRefer.objects.create(name='s3') s1.references.add(s2) s1.related.add(s3) e1 = Entry.objects.create(name='e1') t1 = Tag.objects.create(name='t1') t2 = Tag.objects.create(name='t2') e1.topics.add(t1) e1.related.add(t2) self.assertQuerysetEqual(s1.references.all(), ["<SelfRefer: s2>"]) self.assertQuerysetEqual(s1.related.all(), ["<SelfRefer: s3>"]) self.assertQuerysetEqual(e1.topics.all(), ["<Tag: t1>"]) self.assertQuerysetEqual(e1.related.all(), ["<Tag: t2>"]) def test_internal_related_name_not_in_error_msg(self): # The secret internal related names for self-referential many-to-many # fields shouldn't appear in the list when an error is made. six.assertRaisesRegex( self, FieldError, "Choices are: id, name, references, related, selfreferchild, selfreferchildsibling$", lambda: SelfRefer.objects.filter(porcupine='fred') ) def test_m2m_inheritance_symmetry(self): # Test to ensure that the relationship between two inherited models # with a self-referential m2m field maintains symmetry sr_child = SelfReferChild(name="Hanna") sr_child.save() sr_sibling = SelfReferChildSibling(name="Beth") sr_sibling.save() sr_child.related.add(sr_sibling) self.assertQuerysetEqual(sr_child.related.all(), ["<SelfRefer: Beth>"]) self.assertQuerysetEqual(sr_sibling.related.all(), ["<SelfRefer: Hanna>"]) def test_m2m_pk_field_type(self): # Regression for #11311 - The primary key for models in a m2m relation # doesn't have to be an AutoField w = Worksheet(id='abc') w.save() w.delete() def test_add_m2m_with_base_class(self): # Regression for #11956 -- You can add an object to a m2m with the # base class without causing integrity errors t1 = Tag.objects.create(name='t1') t2 = Tag.objects.create(name='t2') c1 = TagCollection.objects.create(name='c1') c1.tags = [t1, t2] c1 = TagCollection.objects.get(name='c1') self.assertQuerysetEqual(c1.tags.all(), ["<Tag: t1>", "<Tag: t2>"], ordered=False) self.assertQuerysetEqual(t1.tag_collections.all(), ["<TagCollection: c1>"]) def test_manager_class_caching(self): e1 = Entry.objects.create() e2 = Entry.objects.create() t1 = Tag.objects.create() t2 = Tag.objects.create() # Get same manager twice in a row: self.assertIs(t1.entry_set.__class__, t1.entry_set.__class__) self.assertIs(e1.topics.__class__, e1.topics.__class__) # Get same manager for different instances self.assertIs(e1.topics.__class__, e2.topics.__class__) self.assertIs(t1.entry_set.__class__, t2.entry_set.__class__) def test_m2m_abstract_split(self): # Regression for #19236 - an abstract class with a 'split' method # causes a TypeError in add_lazy_relation m1 = RegressionModelSplit(name='1') m1.save() def test_assigning_invalid_data_to_m2m_doesnt_clear_existing_relations(self): t1 = Tag.objects.create(name='t1') t2 = Tag.objects.create(name='t2') c1 = TagCollection.objects.create(name='c1') c1.tags = [t1, t2] with self.assertRaises(TypeError): c1.tags = 7 c1.refresh_from_db() self.assertQuerysetEqual(c1.tags.order_by('name'), ["<Tag: t1>", "<Tag: t2>"]) def test_multiple_forwards_only_m2m(self): # Regression for #24505 - Multiple ManyToManyFields to same "to" # model with related_name set to '+'. foo = Line.objects.create(name='foo') bar = Line.objects.create(name='bar') post = Post.objects.create() post.primary_lines.add(foo) post.secondary_lines.add(bar) self.assertQuerysetEqual(post.primary_lines.all(), ['<Line: foo>']) self.assertQuerysetEqual(post.secondary_lines.all(), ['<Line: bar>'])
bsd-3-clause
toastedcornflakes/scikit-learn
examples/manifold/plot_compare_methods.py
39
4036
""" ========================================= Comparison of Manifold Learning methods ========================================= An illustration of dimensionality reduction on the S-curve dataset with various manifold learning methods. For a discussion and comparison of these algorithms, see the :ref:`manifold module page <manifold>` For a similar example, where the methods are applied to a sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py` Note that the purpose of the MDS is to find a low-dimensional representation of the data (here 2D) in which the distances respect well the distances in the original high-dimensional space, unlike other manifold-learning algorithms, it does not seeks an isotropic representation of the data in the low-dimensional space. """ # Author: Jake Vanderplas -- <vanderplas@astro.washington.edu> print(__doc__) from time import time import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import NullFormatter from sklearn import manifold, datasets # Next line to silence pyflakes. This import is needed. Axes3D n_points = 1000 X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0) n_neighbors = 10 n_components = 2 fig = plt.figure(figsize=(15, 8)) plt.suptitle("Manifold Learning with %i points, %i neighbors" % (1000, n_neighbors), fontsize=14) try: # compatibility matplotlib < 1.0 ax = fig.add_subplot(251, projection='3d') ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral) ax.view_init(4, -72) except: ax = fig.add_subplot(251, projection='3d') plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral) methods = ['standard', 'ltsa', 'hessian', 'modified'] labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE'] for i, method in enumerate(methods): t0 = time() Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components, eigen_solver='auto', method=method).fit_transform(X) t1 = time() print("%s: %.2g sec" % (methods[i], t1 - t0)) ax = fig.add_subplot(252 + i) plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral) plt.title("%s (%.2g sec)" % (labels[i], t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') t0 = time() Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X) t1 = time() print("Isomap: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(257) plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral) plt.title("Isomap (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') t0 = time() mds = manifold.MDS(n_components, max_iter=100, n_init=1) Y = mds.fit_transform(X) t1 = time() print("MDS: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(258) plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral) plt.title("MDS (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') t0 = time() se = manifold.SpectralEmbedding(n_components=n_components, n_neighbors=n_neighbors) Y = se.fit_transform(X) t1 = time() print("SpectralEmbedding: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(259) plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral) plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') t0 = time() tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0) Y = tsne.fit_transform(X) t1 = time() print("t-SNE: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(2, 5, 10) plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral) plt.title("t-SNE (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') plt.show()
bsd-3-clause
home-assistant/home-assistant
homeassistant/components/mqtt_statestream/__init__.py
21
2598
"""Publish simple item state changes via MQTT.""" import json import voluptuous as vol from homeassistant.components.mqtt import valid_publish_topic from homeassistant.const import MATCH_ALL from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entityfilter import ( INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA, convert_include_exclude_filter, ) from homeassistant.helpers.event import async_track_state_change from homeassistant.helpers.json import JSONEncoder CONF_BASE_TOPIC = "base_topic" CONF_PUBLISH_ATTRIBUTES = "publish_attributes" CONF_PUBLISH_TIMESTAMPS = "publish_timestamps" DOMAIN = "mqtt_statestream" CONFIG_SCHEMA = vol.Schema( { DOMAIN: INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA.extend( { vol.Required(CONF_BASE_TOPIC): valid_publish_topic, vol.Optional(CONF_PUBLISH_ATTRIBUTES, default=False): cv.boolean, vol.Optional(CONF_PUBLISH_TIMESTAMPS, default=False): cv.boolean, } ), }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the MQTT state feed.""" conf = config.get(DOMAIN) publish_filter = convert_include_exclude_filter(conf) base_topic = conf.get(CONF_BASE_TOPIC) publish_attributes = conf.get(CONF_PUBLISH_ATTRIBUTES) publish_timestamps = conf.get(CONF_PUBLISH_TIMESTAMPS) if not base_topic.endswith("/"): base_topic = f"{base_topic}/" @callback def _state_publisher(entity_id, old_state, new_state): if new_state is None: return if not publish_filter(entity_id): return payload = new_state.state mybase = f"{base_topic}{entity_id.replace('.', '/')}/" hass.components.mqtt.async_publish(f"{mybase}state", payload, 1, True) if publish_timestamps: if new_state.last_updated: hass.components.mqtt.async_publish( f"{mybase}last_updated", new_state.last_updated.isoformat(), 1, True ) if new_state.last_changed: hass.components.mqtt.async_publish( f"{mybase}last_changed", new_state.last_changed.isoformat(), 1, True ) if publish_attributes: for key, val in new_state.attributes.items(): encoded_val = json.dumps(val, cls=JSONEncoder) hass.components.mqtt.async_publish(mybase + key, encoded_val, 1, True) async_track_state_change(hass, MATCH_ALL, _state_publisher) return True
apache-2.0
fredericlepied/ansible
lib/ansible/modules/network/bigswitch/bigmon_policy.py
8
6485
#!/usr/bin/python # -*- coding: utf-8 -*- # Ansible module to manage Big Monitoring Fabric service chains # (c) 2016, Ted Elhourani <ted@bigswitch.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: bigmon_policy author: "Ted (@tedelhourani)" short_description: Create and remove a bigmon out-of-band policy. description: - Create and remove a bigmon out-of-band policy. version_added: "2.3" options: name: description: - The name of the policy. required: true policy_description: description: - Description of policy. action: description: - Forward matching packets to delivery interfaces, Drop is for measure rate of matching packets, but do not forward to delivery interfaces, capture packets and write to a PCAP file, or enable NetFlow generation. default: forward choices: ['forward', 'drop', 'flow-gen'] priority: description: - A priority associated with this policy. The higher priority policy takes precedence over a lower priority. default: 100 duration: description: - Run policy for duration duration or until delivery_packet_count packets are delivered, whichever comes first. default: 0 start_time: description: - Date the policy becomes active default: ansible_date_time.iso8601 delivery_packet_count: description: - Run policy until delivery_packet_count packets are delivered. default: 0 state: description: - Whether the policy should be present or absent. default: present choices: ['present', 'absent'] controller: description: - The controller address. required: true validate_certs: description: - If C(false), SSL certificates will not be validated. This should only be used on personally controlled devices using self-signed certificates. required: false default: true choices: [true, false] access_token: description: - Bigmon access token. If this isn't set the the environment variable C(BIGSWITCH_ACCESS_TOKEN) is used. ''' EXAMPLES = ''' - name: policy to aggregate filter and deliver data center (DC) 1 traffic bigmon_policy: name: policy1 policy_description: DC 1 traffic policy action: drop controller: '{{ inventory_hostname }}' state: present validate_certs: false ''' RETURN = ''' # ''' import datetime import os import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.bigswitch_utils import Rest from ansible.module_utils._text import to_native def policy(module): try: access_token = module.params['access_token'] or os.environ['BIGSWITCH_ACCESS_TOKEN'] except KeyError as e: module.fail_json(msg='Unable to load %s' % e.message, exception=traceback.format_exc()) name = module.params['name'] policy_description = module.params['policy_description'] action = module.params['action'] priority = module.params['priority'] duration = module.params['duration'] start_time = module.params['start_time'] delivery_packet_count = module.params['delivery_packet_count'] state = module.params['state'] controller = module.params['controller'] rest = Rest(module, {'content-type': 'application/json', 'Cookie': 'session_cookie='+access_token}, 'https://'+controller+':8443/api/v1/data/controller/applications/bigtap') if name is None: module.fail_json(msg='parameter `name` is missing') response = rest.get('policy?config=true', data={}) if response.status_code != 200: module.fail_json(msg="failed to obtain existing policy config: {}".format(response.json['description'])) config_present = False matching = [policy for policy in response.json if policy['name'] == name and policy['duration'] == duration and policy['delivery-packet-count'] == delivery_packet_count and policy['policy-description'] == policy_description and policy['action'] == action and policy['priority'] == priority] if matching: config_present = True if state in ('present') and config_present: module.exit_json(changed=False) if state in ('absent') and not config_present: module.exit_json(changed=False) if state in ('present'): data={'name': name, 'action': action, 'policy-description': policy_description, 'priority': priority, 'duration': duration, 'start-time': start_time, 'delivery-packet-count': delivery_packet_count } response = rest.put('policy[name="%s"]' % name, data=data) if response.status_code == 204: module.exit_json(changed=True) else: module.fail_json(msg="error creating policy '{}': {}".format(name, response.json['description'])) if state in ('absent'): response = rest.delete('policy[name="%s"]' % name, data={}) if response.status_code == 204: module.exit_json(changed=True) else: module.fail_json(msg="error deleting policy '{}': {}".format(name, response.json['description'])) def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True), policy_description=dict(type='str', default=''), action=dict(choices=['forward', 'drop', 'capture', 'flow-gen'], default='forward'), priority=dict(type='int', default=100), duration=dict(type='int', default=0), start_time=dict(type='str', default=datetime.datetime.now().isoformat()+'+00:00'), delivery_packet_count=dict(type='int', default=0), controller=dict(type='str', required=True), state=dict(choices=['present', 'absent'], default='present'), validate_certs=dict(type='bool', default='True'), access_token=dict(type='str', no_log=True) ) ) try: policy(module) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
gpl-3.0
johnbachman/pysb
pysb/tests/test_simulator_cupsoda.py
5
4329
import warnings import numpy as np from nose.plugins.attrib import attr from pysb.examples.tyson_oscillator import model from pysb.simulator.cupsoda import CupSodaSimulator, run_cupsoda from nose.tools import raises import os @attr('gpu') class TestCupSODASimulatorSingle(object): def setUp(self): self.n_sims = 50 self.tspan = np.linspace(0, 500, 101) self.solver = CupSodaSimulator(model, tspan=self.tspan, verbose=False, integrator_options={'atol': 1e-12, 'rtol': 1e-12, 'max_steps': 20000}) len_model_species = len(model.species) y0 = np.zeros((self.n_sims, len_model_species)) for ic in model.initials: for j in range(len_model_species): if str(ic.pattern) == str(model.species[j]): y0[:, j] = ic.value.value break self.y0 = y0 def test_use_of_volume(self): # Initial concentrations self.solver.run(initials=self.y0) print(self.solver.vol) assert self.solver.vol is None self.solver.vol = 1e-20 assert self.solver.vol == 1e-20 def test_integrator_options(self): assert self.solver.opts['atol'] == 1e-12 assert self.solver.opts['rtol'] == 1e-12 assert self.solver.opts['max_steps'] == 20000 def test_arguments(self): with warnings.catch_warnings(): warnings.filterwarnings('ignore', "Neither 'param_values' nor " "'initials' were supplied.") self.solver.run(param_values=None, initials=None) def test_memory_usage(self): assert self.solver.opts['memory_usage'] == 'sharedconstant' self.solver.run(initials=self.y0) # memory_usage='sharedconstant' self.solver.opts['memory_usage'] = 'global' self.solver.run(initials=self.y0) self.solver.opts['memory_usage'] = 'shared' self.solver.run(initials=self.y0) def test_n_blocks(self): print(self.solver.n_blocks) self.solver.n_blocks = 128 assert self.solver.n_blocks == 128 self.solver.run(initials=self.y0) def test_multi_chunks(self): sim = CupSodaSimulator(model, tspan=self.tspan, verbose=False, initials=self.y0, integrator_options={'atol': 1e-12, 'rtol': 1e-12, 'chunksize': 25, 'max_steps': 20000}) res = sim.run() assert res.nsims == self.n_sims @raises(ValueError) def test_set_nblocks_str(self): self.solver.n_blocks = 'fail' @raises(ValueError) def test_set_nblocks_0(self): self.solver.n_blocks = 0 def test_run_tyson(self): # Rate constants len_parameters = len(model.parameters) param_values = np.ones((self.n_sims, len_parameters)) for j in range(len_parameters): param_values[:, j] *= model.parameters[j].value simres = self.solver.run(initials=self.y0) print(simres.observables) self.solver.run(param_values=None, initials=self.y0) self.solver.run(param_values=param_values, initials=self.y0) self.solver.run(param_values=param_values, initials=self.y0) def test_verbose(self): solver = CupSodaSimulator(model, tspan=self.tspan, verbose=True, integrator_options={'atol': 1e-12, 'rtol': 1e-12, 'vol': 1e-5, 'max_steps': 20000}) solver.run() def test_run_cupsoda_instance(self): run_cupsoda(model, tspan=self.tspan) @raises(ValueError) def test_invalid_init_kwarg(self): CupSodaSimulator(model, tspan=self.tspan, spam='eggs') @raises(ValueError) def test_invalid_integrator_option(self): CupSodaSimulator(model, tspan=self.tspan, integrator_options={'spam': 'eggs'})
bsd-2-clause
averainy/averainy
catsup/catsup/generator/__init__.py
1
4430
import time import os import catsup.parser from catsup.logger import logger from catsup.generator.renderer import Renderer from catsup.reader import get_reader from catsup.options import g from catsup.utils import smart_copy from catsup.models import * class Generator(object): def __init__(self, config_path, local=False, base_url=None): self.config_path = config_path self.local = local self.base_url = base_url g.generator = self self.posts = [] self.pages = [] self.non_post_files = [] self.archives = [] self.tags = [] self.caches = [] self.config = {} self.renderer = None self.reset() def reset(self): self.posts = [] self.pages = [] self.non_post_files = [] self.archives = g.archives = Archives() self.tags = g.tags = Tags() self.load_config() self.load_posts() self.load_renderer() self.caches = { "static_url": {}, "url_for": {} } def load_config(self): self.config = g.config = catsup.parser.config( self.config_path, local=self.local, base_url=self.base_url ) def load_posts(self): for f in os.listdir(g.source): if f.startswith("."): # hidden file continue filename, ext = os.path.splitext(f) ext = ext.lower()[1:] reader = get_reader(ext) if reader is not None: logger.info('Loading file %s' % filename) path = os.path.join(g.source, f) post = reader(path) if post.type == "page": self.pages.append(post) else: self.posts.append(post) else: self.non_post_files.append(f) self.posts.sort( key=lambda x: x.datetime, reverse=True ) def load_renderer(self): templates_path = [ g.public_templates_path, os.path.join(g.theme.path, 'templates') ] self.renderer = Renderer( templates_path=templates_path, generator=self ) def add_archives_and_tags(self): for post in self.posts: post.add_archive_and_tags() def generate_feed(self): feed = Feed(self.posts) feed.render(self.renderer) def generate_pages(self): page = Page(self.posts) page.render_all(self.renderer) def generate_posts(self): for post in self.posts: post.render(self.renderer) for page in self.pages: page.render(self.renderer) def generate_tags(self): self.tags.render(self.renderer) def generate_archives(self): self.archives.render(self.renderer) def generate_other_pages(self): NotFound().render(self.renderer) def copy_static_files(self): static_path = self.config.config.static_output smart_copy( os.path.join(g.theme.path, 'static'), static_path ) smart_copy( self.config.config.static_source, static_path ) for f in self.non_post_files: smart_copy( os.path.join(g.source, f), os.path.join(self.config.config.output, f) ) def generate(self): started_loading = time.time() self.reset() self.add_archives_and_tags() finish_loading = time.time() logger.info( "Loaded config and %s posts in %.3fs" % (len(self.posts), finish_loading - started_loading) ) if self.posts: self.generate_posts() self.generate_tags() self.generate_archives() self.generate_feed() self.generate_pages() else: logger.warning("Can't find any post.") self.generate_other_pages() self.copy_static_files() self.renderer.render_sitemap() finish_generating = time.time() logger.info( "Generated %s posts in %.3fs" % (len(self.posts), finish_generating - finish_loading) ) logger.info( "Generating finished in %.3fs" % (finish_generating - started_loading) )
gpl-2.0
campbe13/openhatch
vendor/packages/Django/tests/modeltests/or_lookups/tests.py
150
7625
from __future__ import absolute_import from datetime import datetime from operator import attrgetter from django.db.models import Q from django.test import TestCase from .models import Article class OrLookupsTests(TestCase): def setUp(self): self.a1 = Article.objects.create( headline='Hello', pub_date=datetime(2005, 11, 27) ).pk self.a2 = Article.objects.create( headline='Goodbye', pub_date=datetime(2005, 11, 28) ).pk self.a3 = Article.objects.create( headline='Hello and goodbye', pub_date=datetime(2005, 11, 29) ).pk def test_filter_or(self): self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Hello') | Article.objects.filter(headline__startswith='Goodbye'), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.filter(headline__contains='Hello') | Article.objects.filter(headline__contains='bye'), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.filter(headline__iexact='Hello') | Article.objects.filter(headline__contains='ood'), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__startswith='Goodbye')), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline") ) def test_stages(self): # You can shorten this syntax with code like the following, which is # especially useful if building the query in stages: articles = Article.objects.all() self.assertQuerysetEqual( articles.filter(headline__startswith='Hello') & articles.filter(headline__startswith='Goodbye'), [] ) self.assertQuerysetEqual( articles.filter(headline__startswith='Hello') & articles.filter(headline__contains='bye'), [ 'Hello and goodbye' ], attrgetter("headline") ) def test_pk_q(self): self.assertQuerysetEqual( Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2)), [ 'Hello', 'Goodbye' ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2) | Q(pk=self.a3)), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline"), ) def test_pk_in(self): self.assertQuerysetEqual( Article.objects.filter(pk__in=[self.a1, self.a2, self.a3]), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline"), ) self.assertQuerysetEqual( Article.objects.filter(pk__in=(self.a1, self.a2, self.a3)), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline"), ) self.assertQuerysetEqual( Article.objects.filter(pk__in=[self.a1, self.a2, self.a3, 40000]), [ 'Hello', 'Goodbye', 'Hello and goodbye' ], attrgetter("headline"), ) def test_q_negated(self): # Q objects can be negated self.assertQuerysetEqual( Article.objects.filter(Q(pk=self.a1) | ~Q(pk=self.a2)), [ 'Hello', 'Hello and goodbye' ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.filter(~Q(pk=self.a1) & ~Q(pk=self.a2)), [ 'Hello and goodbye' ], attrgetter("headline"), ) # This allows for more complex queries than filter() and exclude() # alone would allow self.assertQuerysetEqual( Article.objects.filter(Q(pk=self.a1) & (~Q(pk=self.a2) | Q(pk=self.a3))), [ 'Hello' ], attrgetter("headline"), ) def test_complex_filter(self): # The 'complex_filter' method supports framework features such as # 'limit_choices_to' which normally take a single dictionary of lookup # arguments but need to support arbitrary queries via Q objects too. self.assertQuerysetEqual( Article.objects.complex_filter({'pk': self.a1}), [ 'Hello' ], attrgetter("headline"), ) self.assertQuerysetEqual( Article.objects.complex_filter(Q(pk=self.a1) | Q(pk=self.a2)), [ 'Hello', 'Goodbye' ], attrgetter("headline"), ) def test_empty_in(self): # Passing "in" an empty list returns no results ... self.assertQuerysetEqual( Article.objects.filter(pk__in=[]), [] ) # ... but can return results if we OR it with another query. self.assertQuerysetEqual( Article.objects.filter(Q(pk__in=[]) | Q(headline__icontains='goodbye')), [ 'Goodbye', 'Hello and goodbye' ], attrgetter("headline"), ) def test_q_and(self): # Q arg objects are ANDed self.assertQuerysetEqual( Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')), [ 'Hello and goodbye' ], attrgetter("headline") ) # Q arg AND order is irrelevant self.assertQuerysetEqual( Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello'), [ 'Hello and goodbye' ], attrgetter("headline"), ) self.assertQuerysetEqual( Article.objects.filter(Q(headline__startswith='Hello') & Q(headline__startswith='Goodbye')), [] ) def test_q_exclude(self): self.assertQuerysetEqual( Article.objects.exclude(Q(headline__startswith='Hello')), [ 'Goodbye' ], attrgetter("headline") ) def test_other_arg_queries(self): # Try some arg queries with operations other than filter. self.assertEqual( Article.objects.get(Q(headline__startswith='Hello'), Q(headline__contains='bye')).headline, 'Hello and goodbye' ) self.assertEqual( Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__contains='bye')).count(), 3 ) self.assertQuerysetEqual( Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')).values(), [ {"headline": "Hello and goodbye", "id": self.a3, "pub_date": datetime(2005, 11, 29)}, ], lambda o: o, ) self.assertEqual( Article.objects.filter(Q(headline__startswith='Hello')).in_bulk([self.a1, self.a2]), {self.a1: Article.objects.get(pk=self.a1)} )
agpl-3.0
mgit-at/ansible
lib/ansible/plugins/action/uri.py
70
2293
# -*- coding: utf-8 -*- # (c) 2015, Brian Coca <briancoca+dev@gmail.com> # (c) 2018, Matt Martz <matt@sivel.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail from ansible.module_utils._text import to_native from ansible.module_utils.parsing.convert_bool import boolean from ansible.plugins.action import ActionBase class ActionModule(ActionBase): TRANSFERS_FILES = True def run(self, tmp=None, task_vars=None): self._supports_async = True if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect src = self._task.args.get('src', None) remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False) try: if (src and remote_src) or not src: # everything is remote, so we just execute the module # without changing any of the module arguments raise _AnsibleActionDone(result=self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val)) try: src = self._find_needle('files', src) except AnsibleError as e: raise AnsibleActionFail(to_native(e)) tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src)) self._transfer_file(src, tmp_src) self._fixup_perms2((self._connection._shell.tmpdir, tmp_src)) new_module_args = self._task.args.copy() new_module_args.update( dict( src=tmp_src, ) ) result.update(self._execute_module('uri', module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)) except AnsibleAction as e: result.update(e.result) finally: if not self._task.async_val: self._remove_tmp_path(self._connection._shell.tmpdir) return result
gpl-3.0
hdinsight/hue
apps/security/src/security/tests.py
27
1947
#!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nose.tools import assert_true, assert_equal, assert_false from desktop.lib.django_test_util import make_logged_in_client from desktop.lib.test_utils import grant_access from django.contrib.auth.models import User, Group from django.core.urlresolvers import reverse from useradmin.models import HuePermission, GroupPermission class TestSecurity(): def test_permissions(self): client = make_logged_in_client(username='test_permissions', groupname='test_permissions', is_superuser=False) grant_access("test_permissions", "test_permissions", "security") user = User.objects.get(username='test_permissions') def check(client, assertz): response = client.get(reverse("security:hive")) assertz("Impersonate the user" in response.content, response.content) # Forbidden check(client, assert_false) # Allowed group, created = Group.objects.get_or_create(name='test_permissions') perm, created = HuePermission.objects.get_or_create(app='security', action='impersonate') GroupPermission.objects.get_or_create(group=group, hue_permission=perm) check(client, assert_true)
apache-2.0
citrix-openstack-build/python-keystoneclient
keystoneclient/tests/v3/test_access.py
1
5970
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from keystoneclient import access from keystoneclient.openstack.common import timeutils from keystoneclient.tests.v3 import client_fixtures from keystoneclient.tests.v3 import utils TOKEN_RESPONSE = utils.TestResponse({ "headers": client_fixtures.AUTH_RESPONSE_HEADERS }) UNSCOPED_TOKEN = client_fixtures.UNSCOPED_TOKEN DOMAIN_SCOPED_TOKEN = client_fixtures.DOMAIN_SCOPED_TOKEN PROJECT_SCOPED_TOKEN = client_fixtures.PROJECT_SCOPED_TOKEN class AccessInfoTest(utils.TestCase): def test_building_unscoped_accessinfo(self): auth_ref = access.AccessInfo.factory(resp=TOKEN_RESPONSE, body=UNSCOPED_TOKEN) self.assertTrue(auth_ref) self.assertIn('methods', auth_ref) self.assertIn('catalog', auth_ref) self.assertFalse(auth_ref['catalog']) self.assertEquals(auth_ref.auth_token, '3e2813b7ba0b4006840c3825860b86ed') self.assertEquals(auth_ref.username, 'exampleuser') self.assertEquals(auth_ref.user_id, 'c4da488862bd435c9e6c0275a0d0e49a') self.assertEquals(auth_ref.project_name, None) self.assertEquals(auth_ref.project_id, None) self.assertEquals(auth_ref.auth_url, None) self.assertEquals(auth_ref.management_url, None) self.assertFalse(auth_ref.domain_scoped) self.assertFalse(auth_ref.project_scoped) self.assertEquals(auth_ref.user_domain_id, '4e6893b7ba0b4006840c3845660b86ed') self.assertEquals(auth_ref.user_domain_name, 'exampledomain') self.assertIsNone(auth_ref.project_domain_id) self.assertIsNone(auth_ref.project_domain_name) self.assertEquals(auth_ref.expires, timeutils.parse_isotime( UNSCOPED_TOKEN['token']['expires_at'])) def test_will_expire_soon(self): expires = timeutils.utcnow() + datetime.timedelta(minutes=5) UNSCOPED_TOKEN['token']['expires_at'] = expires.isoformat() auth_ref = access.AccessInfo.factory(resp=TOKEN_RESPONSE, body=UNSCOPED_TOKEN) self.assertFalse(auth_ref.will_expire_soon(stale_duration=120)) self.assertTrue(auth_ref.will_expire_soon(stale_duration=300)) self.assertFalse(auth_ref.will_expire_soon()) def test_building_domain_scoped_accessinfo(self): auth_ref = access.AccessInfo.factory(resp=TOKEN_RESPONSE, body=DOMAIN_SCOPED_TOKEN) self.assertTrue(auth_ref) self.assertIn('methods', auth_ref) self.assertIn('catalog', auth_ref) self.assertFalse(auth_ref['catalog']) self.assertEquals(auth_ref.auth_token, '3e2813b7ba0b4006840c3825860b86ed') self.assertEquals(auth_ref.username, 'exampleuser') self.assertEquals(auth_ref.user_id, 'c4da488862bd435c9e6c0275a0d0e49a') self.assertEquals(auth_ref.domain_name, 'anotherdomain') self.assertEquals(auth_ref.domain_id, '8e9283b7ba0b1038840c3842058b86ab') self.assertEquals(auth_ref.project_name, None) self.assertEquals(auth_ref.project_id, None) self.assertEquals(auth_ref.user_domain_id, '4e6893b7ba0b4006840c3845660b86ed') self.assertEquals(auth_ref.user_domain_name, 'exampledomain') self.assertIsNone(auth_ref.project_domain_id) self.assertIsNone(auth_ref.project_domain_name) self.assertTrue(auth_ref.domain_scoped) self.assertFalse(auth_ref.project_scoped) def test_building_project_scoped_accessinfo(self): auth_ref = access.AccessInfo.factory(resp=TOKEN_RESPONSE, body=PROJECT_SCOPED_TOKEN) self.assertTrue(auth_ref) self.assertIn('methods', auth_ref) self.assertIn('catalog', auth_ref) self.assertTrue(auth_ref['catalog']) self.assertEquals(auth_ref.auth_token, '3e2813b7ba0b4006840c3825860b86ed') self.assertEquals(auth_ref.username, 'exampleuser') self.assertEquals(auth_ref.user_id, 'c4da488862bd435c9e6c0275a0d0e49a') self.assertEquals(auth_ref.domain_name, None) self.assertEquals(auth_ref.domain_id, None) self.assertEquals(auth_ref.project_name, 'exampleproject') self.assertEquals(auth_ref.project_id, '225da22d3ce34b15877ea70b2a575f58') self.assertEquals(auth_ref.tenant_name, auth_ref.project_name) self.assertEquals(auth_ref.tenant_id, auth_ref.project_id) self.assertEquals(auth_ref.auth_url, ('http://public.com:5000/v3',)) self.assertEquals(auth_ref.management_url, ('http://admin:35357/v3',)) self.assertEquals(auth_ref.project_domain_id, '4e6893b7ba0b4006840c3845660b86ed') self.assertEquals(auth_ref.project_domain_name, 'exampledomain') self.assertEquals(auth_ref.user_domain_id, '4e6893b7ba0b4006840c3845660b86ed') self.assertEquals(auth_ref.user_domain_name, 'exampledomain') self.assertFalse(auth_ref.domain_scoped) self.assertTrue(auth_ref.project_scoped)
apache-2.0
zasdfgbnm/tensorflow
tensorflow/contrib/distributions/python/ops/chi2.py
78
4175
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Chi2 distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import gamma __all__ = [ "Chi2", "Chi2WithAbsDf", ] class Chi2(gamma.Gamma): """Chi2 distribution. The Chi2 distribution is defined over positive real numbers using a degrees of freedom ("df") parameter. #### Mathematical Details The probability density function (pdf) is, ```none pdf(x; df, x > 0) = x**(0.5 df - 1) exp(-0.5 x) / Z Z = 2**(0.5 df) Gamma(0.5 df) ``` where: * `df` denotes the degrees of freedom, * `Z` is the normalization constant, and, * `Gamma` is the [gamma function]( https://en.wikipedia.org/wiki/Gamma_function). The Chi2 distribution is a special case of the Gamma distribution, i.e., ```python Chi2(df) = Gamma(concentration=0.5 * df, rate=0.5) ``` """ def __init__(self, df, validate_args=False, allow_nan_stats=True, name="Chi2"): """Construct Chi2 distributions with parameter `df`. Args: df: Floating point tensor, the degrees of freedom of the distribution(s). `df` must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = locals() # Even though all stats of chi2 are defined for valid parameters, this is # not true in the parent class "gamma." therefore, passing # allow_nan_stats=True # through to the parent class results in unnecessary asserts. with ops.name_scope(name, values=[df]): self._df = ops.convert_to_tensor(df, name="df") super(Chi2, self).__init__( concentration=0.5 * self._df, rate=constant_op.constant(0.5, dtype=self._df.dtype), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) self._parameters = parameters @staticmethod def _param_shapes(sample_shape): return {"df": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} @property def df(self): return self._df class Chi2WithAbsDf(Chi2): """Chi2 with parameter transform `df = floor(abs(df))`.""" def __init__(self, df, validate_args=False, allow_nan_stats=True, name="Chi2WithAbsDf"): parameters = locals() with ops.name_scope(name, values=[df]): super(Chi2WithAbsDf, self).__init__( df=math_ops.floor( math_ops.abs(df, name="abs_df"), name="floor_abs_df"), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) self._parameters = parameters
apache-2.0
ononeor12/python-social-auth
social/backends/persona.py
70
1845
""" Mozilla Persona authentication backend, docs at: http://psa.matiasaguirre.net/docs/backends/persona.html """ from social.utils import handle_http_errors from social.backends.base import BaseAuth from social.exceptions import AuthFailed, AuthMissingParameter class PersonaAuth(BaseAuth): """BrowserID authentication backend""" name = 'persona' def get_user_id(self, details, response): """Use BrowserID email as ID""" return details['email'] def get_user_details(self, response): """Return user details, BrowserID only provides Email.""" # {'status': 'okay', # 'audience': 'localhost:8000', # 'expires': 1328983575529, # 'email': 'name@server.com', # 'issuer': 'browserid.org'} email = response['email'] return {'username': email.split('@', 1)[0], 'email': email, 'fullname': '', 'first_name': '', 'last_name': ''} def extra_data(self, user, uid, response, details=None, *args, **kwargs): """Return users extra data""" return {'audience': response['audience'], 'issuer': response['issuer']} @handle_http_errors def auth_complete(self, *args, **kwargs): """Completes loging process, must return user instance""" if 'assertion' not in self.data: raise AuthMissingParameter(self, 'assertion') response = self.get_json('https://browserid.org/verify', data={ 'assertion': self.data['assertion'], 'audience': self.strategy.request_host() }, method='POST') if response.get('status') == 'failure': raise AuthFailed(self) kwargs.update({'response': response, 'backend': self}) return self.strategy.authenticate(*args, **kwargs)
bsd-3-clause
garg91824/authomatic
tests/functional_tests/expected_values/vk.py
12
2230
import datetime import re import fixtures import constants from authomatic.providers import oauth2 conf = fixtures.get_configuration('vk') PICTURE = re.compile(r'http://[A-Za-z0-9]+\.vk\.me/[A-Za-z0-9-/]+\.jpg') CONFIG = { 'login_xpath': '//*[@id="box"]/div/input[6]', 'password_xpath': '//*[@id="box"]/div/input[7]', 'consent_xpaths': [ '//*[@id="install_allow"]', ], 'consent_wait_seconds': 4, 'class_': oauth2.VK, 'scope': oauth2.VK.user_info_scope, 'offline': True, 'user': { 'birth_date': conf.user_birth_date_str, 'city': re.compile('\d+'), 'country': re.compile('\d+'), 'email': None, 'first_name': conf.user_first_name, 'gender': re.compile('\d'), 'id': conf.user_id, 'last_name': conf.user_last_name, 'link': None, 'locale': None, 'location': re.compile('\d+, \d+'), 'name': conf.user_name, 'nickname': None, 'phone': None, 'picture': PICTURE, 'postal_code': None, 'timezone': re.compile('\d'), 'username': None, }, 'content_should_contain': [ conf.user_birth_date.strftime('%d.%m.%Y'), conf.user_first_name, conf.user_id, conf.user_last_name, # User info JSON keys 'response', 'uid', 'first_name', 'last_name', 'sex', 'nickname', 'bdate', 'city', 'country', 'timezone', 'photo_big' ], # Case insensitive 'content_should_not_contain': conf.no_email + conf.no_locale + conf.no_phone + conf.no_postal_code + conf.no_username + ['link', conf.user_nickname], # True means that any thruthy value is expected 'credentials': { 'token_type': None, 'provider_type_id': '2-13', '_expiration_time': None, 'consumer_key': None, 'provider_id': None, 'consumer_secret': None, 'token': True, 'token_secret': None, '_expire_in': True, 'provider_name': 'vk', 'refresh_token': None, 'provider_type': 'authomatic.providers.oauth2.OAuth2', 'refresh_status': constants.CREDENTIALS_REFRESH_NOT_SUPPORTED, }, }
mit
probml/pyprobml
scripts/kf_parallel_demo.py
1
4123
# Parallel Kalman Filter demo: this script simulates # 4 missiles as described in the section "state-space models". # Each of the missiles is then filtered and smoothed in parallel # Author: Gerardo Durán-Martín (@gerdm) import jax.numpy as jnp import lds_lib as lds import matplotlib.pyplot as plt import pyprobml_utils as pml from jax import random plt.rcParams["axes.spines.right"] = False plt.rcParams["axes.spines.top"] = False def sample_filter_smooth(key, lds_model, n_samples, noisy_init): """ Sample from a linear dynamical system, apply the kalman filter (forward pass), and performs smoothing. Parameters ---------- lds: LinearDynamicalSystem Instance of a linear dynamical system with known parameters Returns ------- Dictionary with the following key, values * (z_hist) array(n_samples, timesteps, state_size): Simulation of Latent states * (x_hist) array(n_samples, timesteps, observation_size): Simulation of observed states * (mu_hist) array(n_samples, timesteps, state_size): Filtered means mut * (Sigma_hist) array(n_samples, timesteps, state_size, state_size) Filtered covariances Sigmat * (mu_cond_hist) array(n_samples, timesteps, state_size) Filtered conditional means mut|t-1 * (Sigma_cond_hist) array(n_samples, timesteps, state_size, state_size) Filtered conditional covariances Sigmat|t-1 * (mu_hist_smooth) array(n_samples, timesteps, state_size): Smoothed means mut * (Sigma_hist_smooth) array(n_samples, timesteps, state_size, state_size) Smoothed covariances Sigmat """ z_hist, x_hist = lds_model.sample(key, n_samples, noisy_init) mu_hist, Sigma_hist, mu_cond_hist, Sigma_cond_hist = lds_model.filter(x_hist) mu_hist_smooth, Sigma_hist_smooth = lds_model.smooth(mu_hist, Sigma_hist, mu_cond_hist, Sigma_cond_hist) return { "z_hist": z_hist, "x_hist": x_hist, "mu_hist": mu_hist, "Sigma_hist": Sigma_hist, "mu_cond_hist": mu_cond_hist, "Sigma_cond_hist": Sigma_cond_hist, "mu_hist_smooth": mu_hist_smooth, "Sigma_hist_smooth": Sigma_hist_smooth } def plot_collection(obs, ax, means=None, covs=None, **kwargs): n_samples, n_steps, _ = obs.shape for nsim in range(n_samples): X = obs[nsim] if means is not None: mean = means[nsim] ax.scatter(*mean[0, :2], marker="o", s=20, c="black", zorder=2) ax.plot(*mean[:, :2].T, marker="o", markersize=2, **kwargs, zorder=1) if covs is not None: cov = covs[nsim] for t in range(1, n_steps, 3): pml.plot_ellipse(cov[t][:2, :2], mean[t, :2], ax, plot_center=False, alpha=0.7) ax.scatter(*X.T, marker="+", s=60) if __name__ == "__main__": Δ = 1.0 A = jnp.array([ [1, 0, Δ, 0], [0, 1, 0, Δ], [0, 0, 1, 0], [0, 0, 0, 1] ]) C = jnp.array([ [1, 0, 0, 0], [0, 1, 0, 0] ]) state_size, _ = A.shape observation_size, _ = C.shape Q = jnp.eye(state_size) * 0.01 R = jnp.eye(observation_size) * 1.2 # Prior parameter distribution mu0 = jnp.array([8, 10, 1, 0]) Sigma0 = jnp.eye(state_size) * 0.1 n_samples = 4 n_steps = 15 key = random.PRNGKey(3141) lds_instance = lds.KalmanFilter(A, C, Q, R, mu0, Sigma0, n_steps) result = sample_filter_smooth(key, lds_instance, n_samples, True) fig, ax = plt.subplots() plot_collection(result["x_hist"], ax, result["z_hist"], linestyle="--") ax.set_title("State space") pml.savefig("missiles_latent.pdf") fig, ax = plt.subplots() plot_collection(result["x_hist"], ax, result["mu_hist"], result["Sigma_hist"]) ax.set_title("Filtered") pml.savefig("missiles_filtered.pdf") fig, ax = plt.subplots() plot_collection(result["x_hist"], ax, result["mu_hist_smooth"], result["Sigma_hist_smooth"]) ax.set_title("Smoothed") pml.savefig("missiles_smoothed.pdf") plt.show()
mit
zenners/angular-contacts
node_modules/firebase/node_modules/faye-websocket/node_modules/websocket-driver/node_modules/websocket-extensions/node_modules/jstest/node_modules/testling/node_modules/ent/node_modules/tap/node_modules/yamlish/yamlish-py/test/test_reader.py
147
9379
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import yaml import yamlish import test import unittest test_data_list = [ { "name": 'Hello World', "in": [ '--- Hello, World', '...', ], "out": "Hello, World", }, { "name": 'Hello World 2', "in": [ '--- \'Hello, \'\'World\'', '...', ], "out": "Hello, 'World", }, { "name": 'Hello World 3', "in": [ '--- "Hello, World"', '...', ], "out": "Hello, World", }, { "name": 'Hello World 4', "in": [ '--- "Hello, World"', '...', ], "out": "Hello, World", }, { "name": 'Hello World 5', "in": [ '--- >', ' Hello,', ' World', '...', ], "out": "Hello, World\n", }, { "name": 'Hello World 6', "in": [ '--- >', ' Hello,', ' World', '...', ], "error": yaml.parser.ParserError, }, { "name": 'Simple array', "in": [ '---', '- 1', '- 2', '- 3', '...', ], "out": [ 1, 2, 3 ], }, { "name": 'Mixed array', "in": [ '---', '- 1', "- 'two'", r'- "three\n"', '...', ], "out": [ 1, 'two', "three\n" ], }, { "name": 'Hash in array', "in": [ '---', ' - 1', ' - two: 2', ' - 3', '...', ], "out": [ 1, { "two": 2 }, 3 ], }, { "name": 'Hash in array 2', "in": [ '---', '- 1', '- two: 2', ' three: 3', '- 4', '...', ], "out": [ 1, { "two": 2, "three": 3 }, 4 ], }, { "name": 'Nested array', "in": [ '---', '- one', '- ', ' - two', ' - ', ' - three', ' - four', '- five', '...', ], "out": [ 'one', [ 'two', ['three'], 'four' ], 'five' ], }, { "name": 'Nested hash', "in": [ '---', 'one:', ' five: 5', ' two:', ' four: 4', ' three: 3', 'six: 6', '...', ], "out": { "one": { "two": { "three": 3, "four": 4 }, "five": 5 }, "six": 6 }, }, { "name": 'Original YAML::Tiny test', "in": [ '---', 'invoice: 34843', 'date : 2001-01-23', 'bill-to:', ' given : Chris', ' family : Dumars', ' address:', ' lines: | ', ' 458 Walkman Dr.', ' Suite #292', ' city : Royal Oak', ' state : MI', ' postal : 48046', 'product:', ' - sku : BL394D', ' quantity : 4', ' description : Basketball', ' price : 450.00', ' - sku : BL4438H', ' quantity : 1', ' description : Super Hoop', ' price : 2392.00', 'tax : 251.42', 'total: 4443.52', 'comments: >', ' Late afternoon is best.', ' Backup contact is Nancy', ' Billsmer @ 338-4338', '...', ], "out": { "bill-to": { "given": 'Chris', "address": { "city": 'Royal Oak', "postal": 48046, "lines": "458 Walkman Dr.\nSuite #292\n", "state": 'MI' }, "family": 'Dumars' }, "invoice": 34843, "date": '2001-01-23', "tax": 251.42, "product": [ { "sku": 'BL394D', "quantity": 4, "price": 450.00, "description": 'Basketball' }, { "sku": 'BL4438H', "quantity": 1, "price": 2392.00, "description": 'Super Hoop' } ], 'comments': "Late afternoon is best. Backup contact is Nancy Billsmer @ 338-4338\n", "total": 4443.52 } }, # Tests harvested from YAML::Tiny { "in": ['...'], "name": 'Regression: empty', "error": yaml.parser.ParserError, }, { "in": [ '# comment', '...' ], "name": 'Regression: only_comment', "error": yaml.parser.ParserError, }, { "skip": True, # A corner case, which is apparently not # clear even from the spec file "out": None, "in": [ '---', '...' ], "name": 'Regression: only_header', "x-error": yaml.parser.ParserError, }, { "in": [ '---', '---', '...' ], "name": 'Regression: two_header', "error": yaml.composer.ComposerError, }, { "out": None, "in": [ '--- ~', '...' ], "name": 'Regression: one_undef' }, { "out": None, "in": [ '--- ~', '...' ], "name": 'Regression: one_undef2' }, { "in": [ '--- ~', '---', '...' ], "name": 'Regression: two_undef', "error": yaml.composer.ComposerError, }, { "out": 'foo', "in": [ '--- foo', '...' ], "name": 'Regression: one_scalar', }, { "out": 'foo', "in": [ '--- foo', '...' ], "name": 'Regression: one_scalar2', }, { "in": [ '--- foo', '--- bar', '...' ], "name": 'Regression: two_scalar', "error": yaml.composer.ComposerError, }, { "out": ['foo'], "in": [ '---', '- foo', '...' ], "name": 'Regression: one_list1' }, { "out": [ 'foo', 'bar' ], "in": [ '---', '- foo', '- bar', '...' ], "name": 'Regression: one_list2' }, { "out": [ None, 'bar' ], "in": [ '---', '- ~', '- bar', '...' ], "name": 'Regression: one_listundef' }, { "out": { 'foo': 'bar' }, "in": [ '---', 'foo: bar', '...' ], "name": 'Regression: one_hash1' }, { "out": { "foo": 'bar', "this": None }, "in": [ '---', 'foo: bar', 'this: ~', '...' ], "name": 'Regression: one_hash2' }, { "out": { 'foo': [ 'bar', None, 'baz' ] }, "in": [ '---', 'foo:', ' - bar', ' - ~', ' - baz', '...' ], "name": 'Regression: array_in_hash' }, { "out": { "bar": { 'foo': 'bar' }, "foo": None }, "in": [ '---', 'foo: ~', 'bar:', ' foo: bar', '...' ], "name": 'Regression: hash_in_hash' }, { "out": [ { "foo": None, "this": 'that' }, 'foo', None, { "foo": 'bar', "this": 'that' } ], "in": [ '---', '-', ' foo: ~', ' this: that', '- foo', '- ~', '-', ' foo: bar', ' this: that', '...' ], "name": 'Regression: hash_in_array' }, { "out": ['foo'], "in": [ '---', '- \'foo\'', '...' ], "name": 'Regression: single_quote1' }, { "out": [' '], "in": [ '---', '- \' \'', '...' ], "name": 'Regression: single_spaces' }, { "out": [''], "in": [ '---', '- \'\'', '...' ], "name": 'Regression: single_null' }, { "out": ' ', "in": [ '--- " "', '...' ], "name": 'Regression: only_spaces' }, { "out": [ None, { "foo": 'bar', "this": 'that' }, 'baz' ], "in": [ '---', '- ~', '- foo: bar', ' this: that', '- baz', '...' ], "name": 'Regression: inline_nested_hash' }, { "name": "Unprintables", "in": [ "- \"\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\a\\x08\\t\\n\\v\\f\\r\\x0e\\x0f\"", "- \"\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\e\\x1c\\x1d\\x1e\\x1f\"", "- \" !\\\"#$%&'()*+,-./\"", "- 0123456789:;<=>?", "- '\@ABCDEFGHIJKLMNO'", "- 'PQRSTUVWXYZ[\\]^_'", "- '`abcdefghijklmno'", r"- 'pqrstuvwxyz{|}~\177'", "- \\200\\201\\202\\203\\204\\205\\206\\207\\210\\211\\212\\213\\214\\215\\216\\217", "- \\220\\221\\222\\223\\224\\225\\226\\227\\230\\231\\232\\233\\234\\235\\236\\237", "- \\240\\241\\242\\243\\244\\245\\246\\247\\250\\251\\252\\253\\254\\255\\256\\257", "- \\260\\261\\262\\263\\264\\265\\266\\267\\270\\271\\272\\273\\274\\275\\276\\277", "- \\300\\301\\302\\303\\304\\305\\306\\307\\310\\311\\312\\313\\314\\315\\316\\317", "- \\320\\321\\322\\323\\324\\325\\326\\327\\330\\331\\332\\333\\334\\335\\336\\337", "- \\340\\341\\342\\343\\344\\345\\346\\347\\350\\351\\352\\353\\354\\355\\356\\357", "- \\360\\361\\362\\363\\364\\365\\366\\367\\370\\371\\372\\373\\374\\375\\376\\377", "..." ], "out": [ "\0\1\2\3\4\5\6\a\b\t\n\13\f\r\16\17", "\20\21\22\23\24\25\26\27\30\31\32\33\34\35\36\37", " !\"#$%&'()*+,-./", "0123456789:;<=>?", "\@ABCDEFGHIJKLMNO", "PQRSTUVWXYZ[\\]^_", "`abcdefghijklmno", r"pqrstuvwxyz{|}~\177", r"\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217", r"\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237", r"\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257", r"\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277", r"\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317", r"\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337", r"\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357", r"\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377" ] }, { "name": 'Quoted hash keys', "in": [ '---', ' "quoted": Magic!', ' "\\n\\t": newline, tab', '...', ], "out": { "quoted": 'Magic!', "\n\t": 'newline, tab', }, }, ] class TestReader(unittest.TestCase): # IGNORE:C0111 pass test.generate_testsuite(test_data_list, TestReader, yamlish.load) if __name__ == "__main__": unittest.main()
mit
GISeHealth/qgis-versioning
test/issue437_test.py
3
3445
#!/usr/bin/python import versioning_base from pyspatialite import dbapi2 import psycopg2 import os import shutil test_data_dir = os.path.dirname(os.path.realpath(__file__)) tmp_dir = "/tmp" # create the test database os.system("dropdb epanet_test_db") os.system("createdb epanet_test_db") os.system("psql epanet_test_db -c 'CREATE EXTENSION postgis'") os.system("psql epanet_test_db -f "+test_data_dir+"/issue437_test_db.sql") # try the update wc = [tmp_dir+"/issue437_wc0.sqlite", tmp_dir+"/issue437_wc1.sqlite"] for f in wc: if os.path.isfile(f): os.remove(f) versioning_base.checkout("dbname=epanet_test_db", ['epanet_trunk_rev_head.junctions', 'epanet_trunk_rev_head.pipes'], f) scur = [] for f in wc: scur.append(versioning_base.Db( dbapi2.connect( f ) )) scur[0].execute("INSERT INTO pipes_view(id, start_node, end_node, GEOMETRY) VALUES ('2','1','2',GeomFromText('LINESTRING(1 1,0 1)',2154))") scur[0].execute("INSERT INTO pipes_view(id, start_node, end_node, GEOMETRY) VALUES ('3','1','2',GeomFromText('LINESTRING(1 -1,0 1)',2154))") scur[0].commit() versioning_base.commit( wc[0], 'commit 1 wc0', "dbname=epanet_test_db") versioning_base.update( wc[1], "dbname=epanet_test_db" ) scur[0].execute("UPDATE pipes_view SET length = 1") scur[0].commit() scur[1].execute("UPDATE pipes_view SET length = 2") scur[1].execute("UPDATE pipes_view SET length = 3") scur[1].commit() versioning_base.commit( wc[0], "commit 2 wc0", "dbname=epanet_test_db" ) scur[0].execute("SELECT OGC_FID,length,trunk_rev_begin,trunk_rev_end,trunk_parent,trunk_child FROM pipes") print '################' for r in scur[0].fetchall(): print r scur[0].execute("UPDATE pipes_view SET length = 2") scur[0].execute("DELETE FROM pipes_view WHERE OGC_FID = 6") scur[0].commit() versioning_base.commit( wc[0], "commit 3 wc0", "dbname=epanet_test_db" ) scur[0].execute("SELECT OGC_FID,length,trunk_rev_begin,trunk_rev_end,trunk_parent,trunk_child FROM pipes") print '################' for r in scur[0].fetchall(): print r versioning_base.update( wc[1], "dbname=epanet_test_db" ) scur[1].execute("SELECT OGC_FID,length,trunk_rev_begin,trunk_rev_end,trunk_parent,trunk_child FROM pipes_diff") print '################ diff' for r in scur[1].fetchall(): print r scur[1].execute("SELECT conflict_id FROM pipes_conflicts") assert( len(scur[1].fetchall()) == 6 ) # there must be conflicts scur[1].execute("SELECT conflict_id,origin,action,OGC_FID,trunk_parent,trunk_child FROM pipes_conflicts") print '################' for r in scur[1].fetchall(): print r scur[1].execute("DELETE FROM pipes_conflicts WHERE origin='theirs' AND conflict_id=1") scur[1].commit() scur[1].execute("SELECT conflict_id FROM pipes_conflicts") assert( len(scur[1].fetchall()) == 4 ) # there must be two removed entries scur[1].execute("SELECT conflict_id,origin,action,OGC_FID,trunk_parent,trunk_child FROM pipes_conflicts") print '################' for r in scur[1].fetchall(): print r scur[1].execute("DELETE FROM pipes_conflicts WHERE origin='mine' AND OGC_FID = 11") scur[1].execute("DELETE FROM pipes_conflicts WHERE origin='theirs'") scur[1].commit() scur[1].execute("SELECT conflict_id FROM pipes_conflicts") assert( len(scur[1].fetchall()) == 0 ) # there must be no conflict scur[1].execute("SELECT OGC_FID,length,trunk_rev_begin,trunk_rev_end,trunk_parent,trunk_child FROM pipes") print '################' for r in scur[1].fetchall(): print r
gpl-2.0
Brother-Simon/AutobahnTestSuite
autobahntestsuite/autobahntestsuite/case/case9_4_1.py
4
2283
############################################################################### ## ## Copyright 2011 Tavendo GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### from case import Case class Case9_4_1(Case): DESCRIPTION = """Send fragmented binary message message with message payload of length 4 * 2**20 (4M). Sent out in fragments of 64.""" EXPECTATION = """Receive echo'ed binary message (with payload as sent).""" def init(self): self.DATALEN = 4 * 2**20 self.FRAGSIZE = 64 self.PAYLOAD = "\xfe" * self.DATALEN self.WAITSECS = 100 self.reportTime = True def onOpen(self): self.p.createWirelog = False self.behavior = Case.FAILED self.result = "Did not receive message within %d seconds." % self.WAITSECS self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True} self.p.sendMessage(payload = self.PAYLOAD, isBinary = True, fragmentSize = self.FRAGSIZE) self.p.closeAfter(self.WAITSECS) def onMessage(self, payload, isBinary): if not isBinary: self.result = "Expected binary message with payload, but got binary." else: if len(payload) != self.DATALEN: self.result = "Expected binary message with payload of length %d, but got %d." % (self.DATALEN, len(payload)) else: ## FIXME : check actual content ## self.behavior = Case.OK self.result = "Received binary message of length %d." % len(payload) self.p.createWirelog = True self.p.sendClose(self.p.CLOSE_STATUS_CODE_NORMAL)
apache-2.0
lordkman/burnman
burnman/minerals/SLB_2011.py
3
60813
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences # Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU # GPL v2 or later. """ SLB_2011 Minerals from Stixrude & Lithgow-Bertelloni 2011 and references therein File autogenerated using SLBdata_to_burnman.py """ from __future__ import absolute_import from ..mineral import Mineral from ..solidsolution import SolidSolution from ..solutionmodel import * from ..processchemistry import dictionarize_formula, formula_mass ''' SOLID SOLUTIONS from inv251010 of HeFESTo ''' class c2c_pyroxene(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'C2/c pyroxene' self.solution_type = 'ideal' self.endmembers = [ [hp_clinoenstatite(), '[Mg]2Si2O6'], [hp_clinoferrosilite(), '[Fe]2Si2O6']] SolidSolution.__init__(self, molar_fractions=molar_fractions) class ca_ferrite_structured_phase(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'calcium ferrite structured phase' self.solution_type = 'ideal' self.endmembers = [[mg_ca_ferrite(), '[Mg]Al[Al]O4'], [ fe_ca_ferrite(), '[Fe]Al[Al]O4'], [na_ca_ferrite(), '[Na]Al[Si]O4']] SolidSolution.__init__(self, molar_fractions=molar_fractions) class clinopyroxene(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'clinopyroxene' self.solution_type = 'symmetric' self.endmembers = [[diopside(), '[Ca][Mg][Si]2O6'], [hedenbergite(), '[Ca][Fe][Si]2O6'], [ clinoenstatite(), '[Mg][Mg][Si]2O6'], [ca_tschermaks(), '[Ca][Al][Si1/2Al1/2]2O6'], [jadeite(), '[Na][Al][Si]2O6']] self.energy_interaction = [ [0., 24.74e3, 26.e3, 24.3e3], [24.74e3, 0., 0.e3], [60.53136e3, 0.0], [10.e3]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class garnet(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'garnet' self.solution_type = 'symmetric' self.endmembers = [[pyrope(), '[Mg]3[Al][Al]Si3O12'], [almandine(), '[Fe]3[Al][Al]Si3O12'], [ grossular(), '[Ca]3[Al][Al]Si3O12'], [mg_majorite(), '[Mg]3[Mg][Si]Si3O12'], [jd_majorite(), '[Na2/3Al1/3]3[Al][Si]Si3O12']] self.energy_interaction = [ [0.0, 30.e3, 21.20278e3, 0.0], [0.0, 0.0, 0.0], [57.77596e3, 0.0], [0.0]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class akimotoite(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'akimotoite/ilmenite' self.solution_type = 'symmetric' self.endmembers = [[mg_akimotoite(), '[Mg][Si]O3'], [ fe_akimotoite(), '[Fe][Si]O3'], [corundum(), '[Al][Al]O3']] self.energy_interaction = [[0.0, 66.e3], [66.e3]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class ferropericlase(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'magnesiowustite/ferropericlase' self.solution_type = 'symmetric' self.endmembers = [[periclase(), '[Mg]O'], [wuestite(), '[Fe]O']] self.energy_interaction = [[13.e3]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class mg_fe_olivine(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'olivine' self.solution_type = 'symmetric' self.endmembers = [[ forsterite(), '[Mg]2SiO4'], [fayalite(), '[Fe]2SiO4']] self.energy_interaction = [[7.81322e3]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class orthopyroxene(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'orthopyroxene' self.solution_type = 'symmetric' self.endmembers = [[enstatite(), '[Mg][Mg][Si]SiO6'], [ferrosilite(), '[Fe][Fe][Si]SiO6'], [ mg_tschermaks(), '[Mg][Al][Al]SiO6'], [ortho_diopside(), '[Ca][Mg][Si]SiO6']] self.energy_interaction = [ [0.0, 0.0, 32.11352e3], [0.0, 0.0], [48.35316e3]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class plagioclase(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'plagioclase' self.solution_type = 'symmetric' self.endmembers = [ [anorthite(), '[Ca][Al]2Si2O8'], [albite(), '[Na][Al1/2Si1/2]2Si2O8']] self.energy_interaction = [[26.0e3]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class post_perovskite(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'post-perovskite/bridgmanite' self.solution_type = 'symmetric' self.endmembers = [[mg_post_perovskite(), '[Mg][Si]O3'], [ fe_post_perovskite(), '[Fe][Si]O3'], [al_post_perovskite(), '[Al][Al]O3']] self.energy_interaction = [[0.0, 60.0e3], [0.0]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class mg_fe_perovskite(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'magnesium silicate perovskite/bridgmanite' self.solution_type = 'symmetric' self.endmembers = [[mg_perovskite(), '[Mg][Si]O3'], [ fe_perovskite(), '[Fe][Si]O3'], [al_perovskite(), '[Al][Al]O3']] self.energy_interaction = [[0.0, 116.0e3], [0.0]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class mg_fe_ringwoodite(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'ringwoodite' self.solution_type = 'symmetric' self.endmembers = [ [mg_ringwoodite(), '[Mg]2SiO4'], [fe_ringwoodite(), '[Fe]2SiO4']] self.energy_interaction = [[9.34084e3]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class mg_fe_aluminous_spinel(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'spinel-hercynite binary, fixed order' self.solution_type = 'symmetric' self.endmembers = [[spinel(), '[Mg3/4Al1/4]4[Al7/8Mg1/8]8O16'], [ hercynite(), '[Fe3/4Al1/4]4[Al7/8Fe1/8]8O16']] self.energy_interaction = [[5.87646e3]] SolidSolution.__init__(self, molar_fractions=molar_fractions) class mg_fe_wadsleyite(SolidSolution): def __init__(self, molar_fractions=None): self.name = 'wadsleyite' self.solution_type = 'symmetric' self.endmembers = [ [mg_wadsleyite(), '[Mg]2SiO4'], [fe_wadsleyite(), '[Fe]2SiO4']] self.energy_interaction = [[16.74718e3]] SolidSolution.__init__(self, molar_fractions=molar_fractions) """ ENDMEMBERS """ class anorthite (Mineral): def __init__(self): formula = 'CaAl2Si2O8' formula = dictionarize_formula(formula) self.params = { 'name': 'Anorthite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -4014619.0, 'V_0': 0.00010061, 'K_0': 84089150000.0, 'Kprime_0': 4.0, 'Debye_0': 752.3911, 'grueneisen_0': 0.39241, 'q_0': 1.0, 'G_0': 39900000000.0, 'Gprime_0': 1.09134, 'eta_s_0': 1.6254, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 4000.0, 'err_V_0': 0.0, 'err_K_0': 5000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 2.0, 'err_grueneisen_0': 0.05, 'err_q_0': 1.0, 'err_G_0': 3000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class albite (Mineral): def __init__(self): formula = 'NaAlSi3O8' formula = dictionarize_formula(formula) self.params = { 'name': 'Albite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -3718799.0, 'V_0': 0.000100452, 'K_0': 59761620000.0, 'Kprime_0': 4.0, 'Debye_0': 713.7824, 'grueneisen_0': 0.56704, 'q_0': 1.0, 'G_0': 36000000000.0, 'Gprime_0': 1.3855, 'eta_s_0': 1.04208, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 5000.0, 'err_V_0': 0.0, 'err_K_0': 5000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 13.0, 'err_grueneisen_0': 0.03, 'err_q_0': 1.0, 'err_G_0': 5000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class spinel (Mineral): def __init__(self): formula = 'Mg4Al8O16' formula = dictionarize_formula(formula) self.params = { 'name': 'Spinel', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -8667568.0, 'V_0': 0.000159048, 'K_0': 1.969428e+11, 'Kprime_0': 5.68282, 'Debye_0': 842.8104, 'grueneisen_0': 1.02283, 'q_0': 2.71208, 'G_0': 1.085e+11, 'Gprime_0': 0.37303, 'eta_s_0': 2.66282, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 43.76, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 32000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 0.2, 'err_Debye_0': 33.0, 'err_grueneisen_0': 0.04, 'err_q_0': 0.6, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 0.6} Mineral.__init__(self) class hercynite (Mineral): def __init__(self): formula = 'Fe4Al8O16' formula = dictionarize_formula(formula) self.params = { 'name': 'Hercynite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -7324009.0, 'V_0': 0.000163372, 'K_0': 2.088965e+11, 'Kprime_0': 5.68282, 'Debye_0': 763.231, 'grueneisen_0': 1.21719, 'q_0': 2.71208, 'G_0': 84500000000.0, 'Gprime_0': 0.37303, 'eta_s_0': 2.768, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 97.28, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 35000.0, 'err_V_0': 0.0, 'err_K_0': 2000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 32.0, 'err_grueneisen_0': 0.07, 'err_q_0': 1.0, 'err_G_0': 13000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class forsterite (Mineral): def __init__(self): formula = 'Mg2SiO4' formula = dictionarize_formula(formula) self.params = { 'name': 'Forsterite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2055403.0, 'V_0': 4.3603e-05, 'K_0': 1.279555e+11, 'Kprime_0': 4.21796, 'Debye_0': 809.1703, 'grueneisen_0': 0.99282, 'q_0': 2.10672, 'G_0': 81599990000.0, 'Gprime_0': 1.46257, 'eta_s_0': 2.29972, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 2000.0, 'err_V_0': 0.0, 'err_K_0': 2000000000.0, 'err_K_prime_0': 0.2, 'err_Debye_0': 1.0, 'err_grueneisen_0': 0.03, 'err_q_0': 0.2, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 0.1} Mineral.__init__(self) class fayalite (Mineral): def __init__(self): formula = 'Fe2SiO4' formula = dictionarize_formula(formula) self.params = { 'name': 'Fayalite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1370519.0, 'V_0': 4.629e-05, 'K_0': 1.349622e+11, 'Kprime_0': 4.21796, 'Debye_0': 618.7007, 'grueneisen_0': 1.06023, 'q_0': 3.6466, 'G_0': 50899990000.0, 'Gprime_0': 1.46257, 'eta_s_0': 1.02497, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 26.76, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 1000.0, 'err_V_0': 0.0, 'err_K_0': 2000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 2.0, 'err_grueneisen_0': 0.07, 'err_q_0': 1.0, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 0.6} Mineral.__init__(self) class mg_wadsleyite (Mineral): def __init__(self): formula = 'Mg2SiO4' formula = dictionarize_formula(formula) self.params = { 'name': 'Mg_Wadsleyite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2027837.0, 'V_0': 4.0515e-05, 'K_0': 1.686948e+11, 'Kprime_0': 4.3229, 'Debye_0': 843.4973, 'grueneisen_0': 1.2061, 'q_0': 2.0188, 'G_0': 1.12e+11, 'Gprime_0': 1.44424, 'eta_s_0': 2.63683, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 2000.0, 'err_V_0': 0.0, 'err_K_0': 3000000000.0, 'err_K_prime_0': 0.2, 'err_Debye_0': 7.0, 'err_grueneisen_0': 0.09, 'err_q_0': 1.0, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.2, 'err_eta_s_0': 0.4} Mineral.__init__(self) class fe_wadsleyite (Mineral): def __init__(self): formula = 'Fe2SiO4' formula = dictionarize_formula(formula) self.params = { 'name': 'Fe_Wadsleyite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1364668.0, 'V_0': 4.28e-05, 'K_0': 1.68591e+11, 'Kprime_0': 4.3229, 'Debye_0': 665.4492, 'grueneisen_0': 1.2061, 'q_0': 2.0188, 'G_0': 72000000000.0, 'Gprime_0': 1.44424, 'eta_s_0': 1.04017, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 26.76, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 7000.0, 'err_V_0': 0.0, 'err_K_0': 13000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 21.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 12000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class mg_ringwoodite (Mineral): def __init__(self): formula = 'Mg2SiO4' formula = dictionarize_formula(formula) self.params = { 'name': 'Mg_Ringwoodite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2017557.0, 'V_0': 3.9493e-05, 'K_0': 1.849009e+11, 'Kprime_0': 4.22035, 'Debye_0': 877.7094, 'grueneisen_0': 1.10791, 'q_0': 2.3914, 'G_0': 1.23e+11, 'Gprime_0': 1.35412, 'eta_s_0': 2.30461, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 2000.0, 'err_V_0': 0.0, 'err_K_0': 2000000000.0, 'err_K_prime_0': 0.2, 'err_Debye_0': 8.0, 'err_grueneisen_0': 0.1, 'err_q_0': 0.4, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 0.5} Mineral.__init__(self) class fe_ringwoodite (Mineral): def __init__(self): formula = 'Fe2SiO4' formula = dictionarize_formula(formula) self.params = { 'name': 'Fe_Ringwoodite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1362772.0, 'V_0': 4.186e-05, 'K_0': 2.13412e+11, 'Kprime_0': 4.22035, 'Debye_0': 677.7177, 'grueneisen_0': 1.27193, 'q_0': 2.3914, 'G_0': 92000000000.0, 'Gprime_0': 1.35412, 'eta_s_0': 1.77249, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 26.76, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 2000.0, 'err_V_0': 0.0, 'err_K_0': 7000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 8.0, 'err_grueneisen_0': 0.23, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class enstatite (Mineral): def __init__(self): formula = 'Mg2Si2O6' formula = dictionarize_formula(formula) self.params = { 'name': 'Enstatite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2913596.0, 'V_0': 6.2676e-05, 'K_0': 1.070768e+11, 'Kprime_0': 7.02751, 'Debye_0': 812.1848, 'grueneisen_0': 0.78479, 'q_0': 3.43846, 'G_0': 76800000000.0, 'Gprime_0': 1.54596, 'eta_s_0': 2.50453, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 2000.0, 'err_V_0': 0.0, 'err_K_0': 2000000000.0, 'err_K_prime_0': 0.4, 'err_Debye_0': 4.0, 'err_grueneisen_0': 0.04, 'err_q_0': 0.4, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 0.1} Mineral.__init__(self) class ferrosilite (Mineral): def __init__(self): formula = 'Fe2Si2O6' formula = dictionarize_formula(formula) self.params = { 'name': 'Ferrosilite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2225718.0, 'V_0': 6.5941e-05, 'K_0': 1.005386e+11, 'Kprime_0': 7.02751, 'Debye_0': 674.4769, 'grueneisen_0': 0.71889, 'q_0': 3.43846, 'G_0': 52000000000.0, 'Gprime_0': 1.54596, 'eta_s_0': 1.07706, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 26.76, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 4000.0, 'err_V_0': 0.0, 'err_K_0': 4000000000.0, 'err_K_prime_0': 0.5, 'err_Debye_0': 10.0, 'err_grueneisen_0': 0.08, 'err_q_0': 1.0, 'err_G_0': 5000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class mg_tschermaks (Mineral): def __init__(self): formula = 'MgAl2SiO6' formula = dictionarize_formula(formula) self.params = { 'name': 'Mg_Tschermaks', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -3002470.0, 'V_0': 5.914e-05, 'K_0': 1.070768e+11, 'Kprime_0': 7.02751, 'Debye_0': 783.8404, 'grueneisen_0': 0.78479, 'q_0': 3.43846, 'G_0': 95950860000.0, 'Gprime_0': 1.54596, 'eta_s_0': 2.49099, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 9000.0, 'err_V_0': 0.0, 'err_K_0': 10000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 24.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class ortho_diopside (Mineral): def __init__(self): formula = 'CaMgSi2O6' formula = dictionarize_formula(formula) self.params = { 'name': 'Ortho_Diopside', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -3015827.0, 'V_0': 6.8054e-05, 'K_0': 1.070768e+11, 'Kprime_0': 7.02751, 'Debye_0': 744.6988, 'grueneisen_0': 0.78479, 'q_0': 3.43846, 'G_0': 58458950000.0, 'Gprime_0': 1.54596, 'eta_s_0': 1.36161, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 3000.0, 'err_V_0': 0.0, 'err_K_0': 10000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 9.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class diopside (Mineral): def __init__(self): formula = 'CaMgSi2O6' formula = dictionarize_formula(formula) self.params = { 'name': 'Diopside', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -3029531.0, 'V_0': 6.6039e-05, 'K_0': 1.122413e+11, 'Kprime_0': 5.23885, 'Debye_0': 781.6146, 'grueneisen_0': 0.95873, 'q_0': 1.52852, 'G_0': 67000000000.0, 'Gprime_0': 1.37293, 'eta_s_0': 1.57351, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 2000.0, 'err_V_0': 0.0, 'err_K_0': 5000000000.0, 'err_K_prime_0': 1.8, 'err_Debye_0': 3.0, 'err_grueneisen_0': 0.05, 'err_q_0': 2.0, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class hedenbergite (Mineral): def __init__(self): formula = 'CaFeSi2O6' formula = dictionarize_formula(formula) self.params = { 'name': 'Hedenbergite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2677330.0, 'V_0': 6.7867e-05, 'K_0': 1.192555e+11, 'Kprime_0': 5.23885, 'Debye_0': 701.5851, 'grueneisen_0': 0.93516, 'q_0': 1.52852, 'G_0': 61000000000.0, 'Gprime_0': 1.17647, 'eta_s_0': 1.5703, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 13.38, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 45000.0, 'err_V_0': 0.0, 'err_K_0': 4000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 2.0, 'err_grueneisen_0': 0.06, 'err_q_0': 1.0, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class clinoenstatite (Mineral): def __init__(self): formula = 'Mg2Si2O6' formula = dictionarize_formula(formula) self.params = { 'name': 'Clinoenstatite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2905918.0, 'V_0': 6.25e-05, 'K_0': 1.122413e+11, 'Kprime_0': 5.23885, 'Debye_0': 805.0547, 'grueneisen_0': 0.95873, 'q_0': 1.52852, 'G_0': 79496860000.0, 'Gprime_0': 1.62901, 'eta_s_0': 1.69074, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 3000.0, 'err_V_0': 0.0, 'err_K_0': 10000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 10.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class ca_tschermaks (Mineral): def __init__(self): formula = 'CaAl2SiO6' formula = dictionarize_formula(formula) self.params = { 'name': 'Ca_Tschermaks', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -3120253.0, 'V_0': 6.3574e-05, 'K_0': 1.122413e+11, 'Kprime_0': 5.23885, 'Debye_0': 803.6626, 'grueneisen_0': 0.78126, 'q_0': 1.52852, 'G_0': 75160660000.0, 'Gprime_0': 1.54016, 'eta_s_0': 1.9672, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 11.525, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 5000.0, 'err_V_0': 0.0, 'err_K_0': 10000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 5.0, 'err_grueneisen_0': 0.0, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class jadeite (Mineral): def __init__(self): formula = 'NaAlSi2O6' formula = dictionarize_formula(formula) self.params = { 'name': 'Jadeite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2855192.0, 'V_0': 6.0508e-05, 'K_0': 1.422873e+11, 'Kprime_0': 5.23885, 'Debye_0': 820.7623, 'grueneisen_0': 0.903, 'q_0': 0.39234, 'G_0': 85000000000.0, 'Gprime_0': 1.37398, 'eta_s_0': 2.18453, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 3000.0, 'err_V_0': 0.0, 'err_K_0': 2000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 12.0, 'err_grueneisen_0': 0.08, 'err_q_0': 1.4, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class hp_clinoenstatite (Mineral): def __init__(self): formula = 'Mg2Si2O6' formula = dictionarize_formula(formula) self.params = { 'name': 'HP_Clinoenstatite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2905788.0, 'V_0': 6.076e-05, 'K_0': 1.160254e+11, 'Kprime_0': 6.23685, 'Debye_0': 824.4439, 'grueneisen_0': 1.12473, 'q_0': 0.20401, 'G_0': 87927170000.0, 'Gprime_0': 1.84119, 'eta_s_0': 2.14181, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 3000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 0.3, 'err_Debye_0': 7.0, 'err_grueneisen_0': 0.05, 'err_q_0': 0.5, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 0.5} Mineral.__init__(self) class hp_clinoferrosilite (Mineral): def __init__(self): formula = 'Fe2Si2O6' formula = dictionarize_formula(formula) self.params = { 'name': 'HP_Clinoferrosilite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2222183.0, 'V_0': 6.385413e-05, 'K_0': 1.160254e+11, 'Kprime_0': 6.23685, 'Debye_0': 691.564, 'grueneisen_0': 1.12473, 'q_0': 0.20401, 'G_0': 70623090000.0, 'Gprime_0': 1.84119, 'eta_s_0': 0.79216, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 26.76, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 4000.0, 'err_V_0': 0.0, 'err_K_0': 10000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 11.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class ca_perovskite (Mineral): def __init__(self): formula = 'CaSiO3' formula = dictionarize_formula(formula) self.params = { 'name': 'Ca_Perovskite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1463358.0, 'V_0': 2.745e-05, 'K_0': 2.36e+11, 'Kprime_0': 3.9, 'Debye_0': 795.779, 'grueneisen_0': 1.88839, 'q_0': 0.89769, 'G_0': 1.568315e+11, 'Gprime_0': 2.22713, 'eta_s_0': 1.28818, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 8000.0, 'err_V_0': 0.0, 'err_K_0': 4000000000.0, 'err_K_prime_0': 0.2, 'err_Debye_0': 44.0, 'err_grueneisen_0': 0.07, 'err_q_0': 1.6, 'err_G_0': 12000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class mg_akimotoite (Mineral): def __init__(self): formula = 'MgSiO3' formula = dictionarize_formula(formula) self.params = { 'name': 'Mg_Akimotoite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1410850.0, 'V_0': 2.6354e-05, 'K_0': 2.10706e+11, 'Kprime_0': 5.62088, 'Debye_0': 935.9778, 'grueneisen_0': 1.18984, 'q_0': 2.34514, 'G_0': 1.32e+11, 'Gprime_0': 1.57889, 'eta_s_0': 2.80782, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 2000.0, 'err_V_0': 0.0, 'err_K_0': 4000000000.0, 'err_K_prime_0': 0.8, 'err_Debye_0': 12.0, 'err_grueneisen_0': 0.13, 'err_q_0': 0.8, 'err_G_0': 8000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class fe_akimotoite (Mineral): def __init__(self): formula = 'FeSiO3' formula = dictionarize_formula(formula) self.params = { 'name': 'Fe_Akimotoite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1067598.0, 'V_0': 2.6854e-05, 'K_0': 2.10706e+11, 'Kprime_0': 5.62088, 'Debye_0': 887.8709, 'grueneisen_0': 1.18984, 'q_0': 2.34514, 'G_0': 1.523046e+11, 'Gprime_0': 1.57889, 'eta_s_0': 3.5716, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 13.38, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 21000.0, 'err_V_0': 0.0, 'err_K_0': 10000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 120.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class corundum (Mineral): def __init__(self): formula = 'AlAlO3' formula = dictionarize_formula(formula) self.params = { 'name': 'Corundum', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1582454.0, 'V_0': 2.5577e-05, 'K_0': 2.525457e+11, 'Kprime_0': 4.33728, 'Debye_0': 932.5696, 'grueneisen_0': 1.32442, 'q_0': 1.30316, 'G_0': 1.632e+11, 'Gprime_0': 1.64174, 'eta_s_0': 2.8316, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 1000.0, 'err_V_0': 0.0, 'err_K_0': 5000000000.0, 'err_K_prime_0': 0.2, 'err_Debye_0': 3.0, 'err_grueneisen_0': 0.04, 'err_q_0': 0.2, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 0.2} Mineral.__init__(self) class pyrope (Mineral): def __init__(self): formula = 'Mg3Al2Si3O12' formula = dictionarize_formula(formula) self.params = { 'name': 'Pyrope', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -5936538.0, 'V_0': 0.00011308, 'K_0': 1.702396e+11, 'Kprime_0': 4.11067, 'Debye_0': 823.2102, 'grueneisen_0': 1.01424, 'q_0': 1.42169, 'G_0': 93699990000.0, 'Gprime_0': 1.35756, 'eta_s_0': 0.98186, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 10000.0, 'err_V_0': 0.0, 'err_K_0': 2000000000.0, 'err_K_prime_0': 0.3, 'err_Debye_0': 4.0, 'err_grueneisen_0': 0.06, 'err_q_0': 0.5, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.2, 'err_eta_s_0': 0.3} Mineral.__init__(self) class almandine (Mineral): def __init__(self): formula = 'Fe3Al2Si3O12' formula = dictionarize_formula(formula) self.params = { 'name': 'Almandine', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -4935516.0, 'V_0': 0.00011543, 'K_0': 1.738963e+11, 'Kprime_0': 4.91341, 'Debye_0': 741.356, 'grueneisen_0': 1.06495, 'q_0': 1.42169, 'G_0': 96000000000.0, 'Gprime_0': 1.40927, 'eta_s_0': 2.09292, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 40.14, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 29000.0, 'err_V_0': 0.0, 'err_K_0': 2000000000.0, 'err_K_prime_0': 0.2, 'err_Debye_0': 5.0, 'err_grueneisen_0': 0.06, 'err_q_0': 1.0, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 1.0} Mineral.__init__(self) class grossular (Mineral): def __init__(self): formula = 'Ca3Al2Si3O12' formula = dictionarize_formula(formula) self.params = { 'name': 'Grossular', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -6277935.0, 'V_0': 0.00012512, 'K_0': 1.670622e+11, 'Kprime_0': 3.91544, 'Debye_0': 822.743, 'grueneisen_0': 1.05404, 'q_0': 1.88887, 'G_0': 1.09e+11, 'Gprime_0': 1.16274, 'eta_s_0': 2.38418, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 11000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 0.2, 'err_Debye_0': 2.0, 'err_grueneisen_0': 0.06, 'err_q_0': 0.2, 'err_G_0': 4000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 0.1} Mineral.__init__(self) class mg_majorite (Mineral): def __init__(self): formula = 'Mg4Si4O12' formula = dictionarize_formula(formula) self.params = { 'name': 'Mg_Majorite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -5691614.0, 'V_0': 0.000114324, 'K_0': 1.651183e+11, 'Kprime_0': 4.21183, 'Debye_0': 822.458, 'grueneisen_0': 0.97682, 'q_0': 1.53581, 'G_0': 84999990000.0, 'Gprime_0': 1.42969, 'eta_s_0': 1.0178, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 10000.0, 'err_V_0': 0.0, 'err_K_0': 3000000000.0, 'err_K_prime_0': 0.3, 'err_Debye_0': 4.0, 'err_grueneisen_0': 0.07, 'err_q_0': 0.5, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.2, 'err_eta_s_0': 0.3} Mineral.__init__(self) class jd_majorite (Mineral): def __init__(self): formula = 'Na2Al2Si4O12' formula = dictionarize_formula(formula) self.params = { 'name': 'Jd_Majorite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -5518542.0, 'V_0': 0.00011094, 'K_0': 1.770772e+11, 'Kprime_0': 4.11067, 'Debye_0': 895.914, 'grueneisen_0': 1.01424, 'q_0': 1.42169, 'G_0': 1.25e+11, 'Gprime_0': 1.35756, 'eta_s_0': 3.30517, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 14000.0, 'err_V_0': 0.0, 'err_K_0': 7000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 18.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 4000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class quartz (Mineral): def __init__(self): formula = 'SiO2' formula = dictionarize_formula(formula) self.params = { 'name': 'Quartz', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -858853.4, 'V_0': 2.367003e-05, 'K_0': 49547430000.0, 'Kprime_0': 4.33155, 'Debye_0': 816.3307, 'grueneisen_0': -0.00296, 'q_0': 1.0, 'G_0': 44856170000.0, 'Gprime_0': 0.95315, 'eta_s_0': 2.36469, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['landau', {'Tc_0': 847.0, 'S_D': 5.164, 'V_D': 1.222e-06}]] self.uncertainties = { 'err_F_0': 1000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 0.1, 'err_Debye_0': 31.0, 'err_grueneisen_0': 0.05, 'err_q_0': 1.0, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 1.0} Mineral.__init__(self) class coesite (Mineral): def __init__(self): formula = 'SiO2' formula = dictionarize_formula(formula) self.params = { 'name': 'Coesite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -855068.5, 'V_0': 2.0657e-05, 'K_0': 1.135856e+11, 'Kprime_0': 4.0, 'Debye_0': 852.4267, 'grueneisen_0': 0.39157, 'q_0': 1.0, 'G_0': 61600010000.0, 'Gprime_0': 1.24734, 'eta_s_0': 2.39793, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 1000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 9.0, 'err_grueneisen_0': 0.05, 'err_q_0': 1.0, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class stishovite (Mineral): def __init__(self): formula = 'SiO2' formula = dictionarize_formula(formula) self.params = { 'name': 'Stishovite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -818984.6, 'V_0': 1.4017e-05, 'K_0': 3.143352e+11, 'Kprime_0': 3.75122, 'Debye_0': 1107.824, 'grueneisen_0': 1.37466, 'q_0': 2.83517, 'G_0': 2.2e+11, 'Gprime_0': 1.93334, 'eta_s_0': 4.60904, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['landau', {'Tc_0': -4250.0, 'S_D': 0.012, 'V_D': 1e-09}]] self.uncertainties = { 'err_F_0': 1000.0, 'err_V_0': 0.0, 'err_K_0': 8000000000.0, 'err_K_prime_0': 0.1, 'err_Debye_0': 13.0, 'err_grueneisen_0': 0.17, 'err_q_0': 2.2, 'err_G_0': 12000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 1.0} Mineral.__init__(self) class seifertite (Mineral): def __init__(self): formula = 'SiO2' formula = dictionarize_formula(formula) self.params = { 'name': 'Seifertite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -794335.4, 'V_0': 1.367e-05, 'K_0': 3.275843e+11, 'Kprime_0': 4.01553, 'Debye_0': 1140.772, 'grueneisen_0': 1.37466, 'q_0': 2.83517, 'G_0': 2.274532e+11, 'Gprime_0': 1.76965, 'eta_s_0': 4.97108, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 2000.0, 'err_V_0': 0.0, 'err_K_0': 2000000000.0, 'err_K_prime_0': 0.1, 'err_Debye_0': 16.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 1.0} Mineral.__init__(self) class mg_perovskite (Mineral): def __init__(self): formula = 'MgSiO3' formula = dictionarize_formula(formula) self.params = { 'name': 'Mg_Perovskite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1368283.0, 'V_0': 2.4445e-05, 'K_0': 2.505264e+11, 'Kprime_0': 4.14, 'Debye_0': 905.9412, 'grueneisen_0': 1.56508, 'q_0': 1.10945, 'G_0': 1.729e+11, 'Gprime_0': 1.69037, 'eta_s_0': 2.56536, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 1000.0, 'err_V_0': 0.0, 'err_K_0': 3000000000.0, 'err_K_prime_0': 0.1, 'err_Debye_0': 5.0, 'err_grueneisen_0': 0.05, 'err_q_0': 0.3, 'err_G_0': 2000000000.0, 'err_Gprime_0': 0.0, 'err_eta_s_0': 0.3} Mineral.__init__(self) class fe_perovskite (Mineral): def __init__(self): formula = 'FeSiO3' formula = dictionarize_formula(formula) self.params = { 'name': 'Fe_Perovskite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1040920.0, 'V_0': 2.5485e-05, 'K_0': 2.721152e+11, 'Kprime_0': 4.14, 'Debye_0': 870.8122, 'grueneisen_0': 1.56508, 'q_0': 1.10945, 'G_0': 1.326849e+11, 'Gprime_0': 1.37485, 'eta_s_0': 2.29211, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 13.38, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 6000.0, 'err_V_0': 0.0, 'err_K_0': 40000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 26.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 40000000000.0, 'err_Gprime_0': 0.0, 'err_eta_s_0': 1.0} Mineral.__init__(self) class al_perovskite (Mineral): def __init__(self): formula = 'AlAlO3' formula = dictionarize_formula(formula) self.params = { 'name': 'Al_perovskite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1533878.0, 'V_0': 2.4944e-05, 'K_0': 2.582e+11, 'Kprime_0': 4.14, 'Debye_0': 886.4601, 'grueneisen_0': 1.56508, 'q_0': 1.10945, 'G_0': 1.713116e+11, 'Gprime_0': 1.49706, 'eta_s_0': 2.47126, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 2000.0, 'err_V_0': 0.0, 'err_K_0': 10000000000.0, 'err_K_prime_0': 0.5, 'err_Debye_0': 7.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 0.5} Mineral.__init__(self) class mg_post_perovskite (Mineral): def __init__(self): formula = 'MgSiO3' formula = dictionarize_formula(formula) self.params = { 'name': 'Mg_Post_Perovskite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1348641.0, 'V_0': 2.4419e-05, 'K_0': 2.312e+11, 'Kprime_0': 4.0, 'Debye_0': 855.8173, 'grueneisen_0': 1.89155, 'q_0': 1.09081, 'G_0': 1.50167e+11, 'Gprime_0': 1.97874, 'eta_s_0': 1.16704, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 3000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 0.1, 'err_Debye_0': 7.0, 'err_grueneisen_0': 0.03, 'err_q_0': 0.1, 'err_G_0': 4000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 0.2} Mineral.__init__(self) class fe_post_perovskite (Mineral): def __init__(self): formula = 'FeSiO3' formula = dictionarize_formula(formula) self.params = { 'name': 'Fe_Post_Perovskite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -981806.9, 'V_0': 2.5459e-05, 'K_0': 2.312e+11, 'Kprime_0': 4.0, 'Debye_0': 781.3465, 'grueneisen_0': 1.89155, 'q_0': 1.09081, 'G_0': 1.295e+11, 'Gprime_0': 1.44675, 'eta_s_0': 1.36382, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 13.38, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 21000.0, 'err_V_0': 0.0, 'err_K_0': 10000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 52.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 5000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 1.0} Mineral.__init__(self) class al_post_perovskite (Mineral): def __init__(self): formula = 'AlAlO3' formula = dictionarize_formula(formula) self.params = { 'name': 'Al_Post_Perovskite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1377582.0, 'V_0': 2.3847e-05, 'K_0': 2.49e+11, 'Kprime_0': 4.0, 'Debye_0': 762.1951, 'grueneisen_0': 1.64573, 'q_0': 1.09081, 'G_0': 91965310000.0, 'Gprime_0': 1.81603, 'eta_s_0': 2.83762, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 4000.0, 'err_V_0': 0.0, 'err_K_0': 20000000000.0, 'err_K_prime_0': 0.1, 'err_Debye_0': 9.0, 'err_grueneisen_0': 0.02, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 0.2} Mineral.__init__(self) class periclase (Mineral): def __init__(self): formula = 'MgO' formula = dictionarize_formula(formula) self.params = { 'name': 'Periclase', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -569444.6, 'V_0': 1.1244e-05, 'K_0': 1.613836e+11, 'Kprime_0': 3.84045, 'Debye_0': 767.0977, 'grueneisen_0': 1.36127, 'q_0': 1.7217, 'G_0': 1.309e+11, 'Gprime_0': 2.1438, 'eta_s_0': 2.81765, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 0.0, 'err_V_0': 0.0, 'err_K_0': 3000000000.0, 'err_K_prime_0': 0.2, 'err_Debye_0': 9.0, 'err_grueneisen_0': 0.05, 'err_q_0': 0.2, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 0.2} Mineral.__init__(self) class wuestite (Mineral): def __init__(self): formula = 'FeO' formula = dictionarize_formula(formula) self.params = { 'name': 'Wuestite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -242146.0, 'V_0': 1.2264e-05, 'K_0': 1.794442e+11, 'Kprime_0': 4.9376, 'Debye_0': 454.1592, 'grueneisen_0': 1.53047, 'q_0': 1.7217, 'G_0': 59000000000.0, 'Gprime_0': 1.44673, 'eta_s_0': -0.05731, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 13.38, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 1000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 0.2, 'err_Debye_0': 21.0, 'err_grueneisen_0': 0.13, 'err_q_0': 1.0, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 1.0} Mineral.__init__(self) class mg_ca_ferrite (Mineral): def __init__(self): formula = 'MgAl2O4' formula = dictionarize_formula(formula) self.params = { 'name': 'Mg_Ca_Ferrite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2122169.0, 'V_0': 3.6177e-05, 'K_0': 2.106663e+11, 'Kprime_0': 4.0528, 'Debye_0': 838.6291, 'grueneisen_0': 1.31156, 'q_0': 1.0, 'G_0': 1.29826e+11, 'Gprime_0': 1.75878, 'eta_s_0': 2.1073, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 4000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 0.1, 'err_Debye_0': 16.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 1.0} Mineral.__init__(self) class fe_ca_ferrite (Mineral): def __init__(self): formula = 'FeAl2O4' formula = dictionarize_formula(formula) self.params = { 'name': 'Fe_Ca_Ferrite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1790284.0, 'V_0': 3.7258e-05, 'K_0': 2.106663e+11, 'Kprime_0': 4.0528, 'Debye_0': 804.1986, 'grueneisen_0': 1.31156, 'q_0': 1.0, 'G_0': 1.535236e+11, 'Gprime_0': 1.75878, 'eta_s_0': 3.0268, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.property_modifiers = [ ['linear', {'delta_E': 0.0, 'delta_S': 13.38, 'delta_V': 0.0}]] self.uncertainties = { 'err_F_0': 25000.0, 'err_V_0': 0.0, 'err_K_0': 10000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 69.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class na_ca_ferrite (Mineral): def __init__(self): formula = 'NaAlSiO4' formula = dictionarize_formula(formula) self.params = { 'name': 'Na_Ca_Ferrite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1844129.0, 'V_0': 3.627e-05, 'K_0': 1.613385e+11, 'Kprime_0': 4.32479, 'Debye_0': 812.4769, 'grueneisen_0': 0.69428, 'q_0': 1.0, 'G_0': 1.220049e+11, 'Gprime_0': 2.07687, 'eta_s_0': 2.79016, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 11000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 0.1, 'err_Debye_0': 51.0, 'err_grueneisen_0': 0.3, 'err_q_0': 1.0, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.1, 'err_eta_s_0': 1.0} Mineral.__init__(self) class kyanite (Mineral): def __init__(self): formula = 'Al2SiO5' formula = dictionarize_formula(formula) self.params = { 'name': 'Kyanite', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -2446058.0, 'V_0': 4.4227e-05, 'K_0': 1.6e+11, 'Kprime_0': 4.0, 'Debye_0': 943.1665, 'grueneisen_0': 0.9255, 'q_0': 1.0, 'G_0': 1.204033e+11, 'Gprime_0': 1.7308, 'eta_s_0': 2.96665, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 4000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 0.0, 'err_Debye_0': 8.0, 'err_grueneisen_0': 0.07, 'err_q_0': 1.0, 'err_G_0': 10000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) class nepheline (Mineral): def __init__(self): formula = 'NaAlSiO4' formula = dictionarize_formula(formula) self.params = { 'name': 'Nepheline', 'formula': formula, 'equation_of_state': 'slb3', 'F_0': -1992104.0, 'V_0': 5.46684e-05, 'K_0': 53077990000.0, 'Kprime_0': 4.0, 'Debye_0': 700.9422, 'grueneisen_0': 0.69428, 'q_0': 1.0, 'G_0': 30700000000.0, 'Gprime_0': 1.33031, 'eta_s_0': 0.6291, 'n': sum(formula.values()), 'molar_mass': formula_mass(formula)} self.uncertainties = { 'err_F_0': 3000.0, 'err_V_0': 0.0, 'err_K_0': 1000000000.0, 'err_K_prime_0': 1.0, 'err_Debye_0': 13.0, 'err_grueneisen_0': 0.03, 'err_q_0': 1.0, 'err_G_0': 1000000000.0, 'err_Gprime_0': 0.5, 'err_eta_s_0': 1.0} Mineral.__init__(self) ''' Mineral aliases ''' # Feldspars ab = albite an = anorthite # LP Spinels sp = spinel hc = hercynite # Olivine polymorphs fo = forsterite fa = fayalite mgwa = mg_wadsleyite fewa = fe_wadsleyite mgri = mg_ringwoodite feri = fe_ringwoodite # Orthopyroxenes en = enstatite fs = ferrosilite mgts = mg_tschermaks odi = ortho_diopside # Clinopyroxenes di = diopside he = hedenbergite cen = clinoenstatite cats = ca_tschermaks jd = jadeite mgc2 = hp_clinoenstatite fec2 = hp_clinoferrosilite hpcen = hp_clinoenstatite hpcfs = hp_clinoferrosilite # Perovskites mgpv = mg_perovskite mg_bridgmanite = mg_perovskite fepv = fe_perovskite fe_bridgmanite = fe_perovskite alpv = al_perovskite capv = ca_perovskite # Ilmenite group mgil = mg_akimotoite feil = fe_akimotoite co = corundum # Garnet group py = pyrope al = almandine gr = grossular mgmj = mg_majorite jdmj = jd_majorite # Quartz polymorphs qtz = quartz coes = coesite st = stishovite seif = seifertite # Post perovskites mppv = mg_post_perovskite fppv = fe_post_perovskite appv = al_post_perovskite # Magnesiowuestite pe = periclase wu = wuestite # Calcium ferrite structured phases mgcf = mg_ca_ferrite fecf = fe_ca_ferrite nacf = na_ca_ferrite # Al2SiO5 polymorphs ky = kyanite # Nepheline group neph = nepheline # Solid solution aliases c2c = c2c_pyroxene cf = ca_ferrite_structured_phase cpx = clinopyroxene gt = garnet il = akimotoite ilmenite_group = akimotoite mw = ferropericlase magnesiowuestite = ferropericlase ol = mg_fe_olivine opx = orthopyroxene plag = plagioclase ppv = post_perovskite pv = mg_fe_perovskite mg_fe_bridgmanite = mg_fe_perovskite mg_fe_silicate_perovskite = mg_fe_perovskite ri = mg_fe_ringwoodite spinel_group = mg_fe_aluminous_spinel wa = mg_fe_wadsleyite spinelloid_III = mg_fe_wadsleyite
gpl-2.0
spring01/libPSI
lib/python/grendel/interface/legacy_xml.py
1
5800
""" Legacy support for parsing results from Grendel++ data.xml files """ from __future__ import print_function from collections import defaultdict import sys from xml.etree import ElementTree from grendel import sanity_checking_enabled from grendel.chemistry.molecular_properties import Energy, MolecularProperty from grendel.chemistry.molecule_dict import MoleculeDict from grendel.chemistry.molecule import MoleculeStub from grendel.interface.computation_details import ComputationDetails from grendel.interface.result_getter import ResultGetter from grendel.util.metaprogramming import ReadOnlyAttribute from grendel.util.overloading import listify_args from grendel.util.strings import indented from grendel.util.units import * from grendel.util.units.unit import DistanceUnit class InvalidLegacyXMLFileError(ValueError): pass class PropertyUnavailableError(ValueError): pass class XMLResultGetterError(ValueError): pass class LegacyXMLResultGetter(ResultGetter): ############## # Attributes # ############## files = ReadOnlyAttribute('files') properties = None properties_for_molecules = None ################## # Initialization # ################## def __init__(self, comparison_representation, *files): self.started = True self._files = listify_args(*files) self.properties = MoleculeDict(comparison_representation, default=lambda: []) self.properties_for_molecules = defaultdict(lambda: []) for file in files: self._parse_file(file) ################### # Private Methods # ################### def _parse_file(self, file): def _get_at_least_one(parent, tag, dispnum): ret_val = parent.findall(tag) if sanity_checking_enabled: if len(ret_val) == 0: raise InvalidLegacyXMLFileError("missing {} section " "for displacement number {}".format(tag, dispnum)) return ret_val def _get_exactly_one(parent, tag, dispnum): ret_val = _get_at_least_one(parent, tag, dispnum) if sanity_checking_enabled: if len(ret_val) > 1: raise InvalidLegacyXMLFileError("multiple {} sections " "for displacement number {}".format(tag, dispnum)) return ret_val[0] #========================================# etr = ElementTree.parse(file) for disp in etr.iter('displacement'): disp_number = disp.get('number', '<unnumbered>') # Get the molecule part mol_sect = _get_exactly_one(disp, 'molecule', disp_number) # Get the XYZ section xyz_sect = _get_exactly_one(mol_sect, 'xyz', disp_number) if 'units' in xyz_sect.keys(): unitstr = xyz_sect.get('units') units = eval(unitstr.title(), globals()) else: units = DistanceUnit.default energy_el = _get_at_least_one(disp, 'energy', disp_number) # for now just use the "molecular" energy energy_el = [e for e in energy_el if e.get('type', '') == 'molecular'] if len(energy_el) == 0: raise InvalidLegacyXMLFileError("missing energy with type='molecular' " "for displacement number {}".format(disp_number)) elif len(energy_el) > 1: raise InvalidLegacyXMLFileError("multiple energy elements with type='molecular' " "for displacement number {}".format(disp_number)) energy_el = energy_el[0] if 'units' in energy_el.keys(): unitstr = energy_el.get('units') energy_units = eval(unitstr.title(), globals()) else: energy_units = Hartrees energy_val = float(energy_el.get('value')) * energy_units mol_stub = MoleculeStub(xyz_sect.text, units=units) energy = Energy(mol_stub, units=energy_units, details=ComputationDetails(type='molecular')) energy.value = energy_val self.properties[mol_stub].append(energy) def can_get_property_for_molecule(self, molecule, property, details=None): return self.has_property_for_molecule(molecule, property, details) def has_property_for_molecule(self, molecule, property, details=None, verbose=True): if molecule in self.properties: props = self.properties[molecule] for p in props: pcopy = copy(p) pcopy.molecule = molecule self.properties_for_molecules[molecule].append(pcopy) if MolecularProperty.is_same_property(property, p): if ComputationDetails.is_compatible_details(details, p.details): return True return False def get_property_for_molecule(self, molecule, property, details=None): for p in self.properties_for_molecules[molecule]: if MolecularProperty.is_same_property(property, p): if ComputationDetails.is_compatible_details(details, p.details): return p props = self.properties[molecule] for p in props: pcopy = copy(p) pcopy.molecule = molecule self.properties_for_molecules[molecule].append(pcopy) if MolecularProperty.is_same_property(property, p): if ComputationDetails.is_compatible_details(details, p.details): return pcopy raise PropertyUnavailableError ################### # Private Methods # ###################
gpl-2.0
Denisolt/IEEE-NYIT-MA
local/lib/python2.7/site-packages/PIL/MspImagePlugin.py
8
5580
# # The Python Imaging Library. # # MSP file handling # # This is the format used by the Paint program in Windows 1 and 2. # # History: # 95-09-05 fl Created # 97-01-03 fl Read/write MSP images # 17-02-21 es Fixed RLE interpretation # # Copyright (c) Secret Labs AB 1997. # Copyright (c) Fredrik Lundh 1995-97. # Copyright (c) Eric Soroos 2017. # # See the README file for information on usage and redistribution. # # More info on this format: https://archive.org/details/gg243631 # Page 313: # Figure 205. Windows Paint Version 1: "DanM" Format # Figure 206. Windows Paint Version 2: "LinS" Format. Used in Windows V2.03 # # See also: http://www.fileformat.info/format/mspaint/egff.htm from . import Image, ImageFile from ._binary import i16le as i16, o16le as o16, i8 import struct, io __version__ = "0.1" # # read MSP files def _accept(prefix): return prefix[:4] in [b"DanM", b"LinS"] ## # Image plugin for Windows MSP images. This plugin supports both # uncompressed (Windows 1.0). class MspImageFile(ImageFile.ImageFile): format = "MSP" format_description = "Windows Paint" def _open(self): # Header s = self.fp.read(32) if s[:4] not in [b"DanM", b"LinS"]: raise SyntaxError("not an MSP file") # Header checksum checksum = 0 for i in range(0, 32, 2): checksum = checksum ^ i16(s[i:i+2]) if checksum != 0: raise SyntaxError("bad MSP checksum") self.mode = "1" self.size = i16(s[4:]), i16(s[6:]) if s[:4] == b"DanM": self.tile = [("raw", (0, 0)+self.size, 32, ("1", 0, 1))] else: self.tile = [("MSP", (0, 0)+self.size, 32, None)] class MspDecoder(ImageFile.PyDecoder): # The algo for the MSP decoder is from # http://www.fileformat.info/format/mspaint/egff.htm # cc-by-attribution -- That page references is taken from the # Encyclopedia of Graphics File Formats and is licensed by # O'Reilly under the Creative Common/Attribution license # # For RLE encoded files, the 32byte header is followed by a scan # line map, encoded as one 16bit word of encoded byte length per # line. # # NOTE: the encoded length of the line can be 0. This was not # handled in the previous version of this encoder, and there's no # mention of how to handle it in the documentation. From the few # examples I've seen, I've assumed that it is a fill of the # background color, in this case, white. # # # Pseudocode of the decoder: # Read a BYTE value as the RunType # If the RunType value is zero # Read next byte as the RunCount # Read the next byte as the RunValue # Write the RunValue byte RunCount times # If the RunType value is non-zero # Use this value as the RunCount # Read and write the next RunCount bytes literally # # e.g.: # 0x00 03 ff 05 00 01 02 03 04 # would yield the bytes: # 0xff ff ff 00 01 02 03 04 # # which are then interpreted as a bit packed mode '1' image _pulls_fd = True def decode(self, buffer): img = io.BytesIO() blank_line = bytearray((0xff,)*((self.state.xsize+7)//8)) try: self.fd.seek(32) rowmap = struct.unpack_from("<%dH" % (self.state.ysize), self.fd.read(self.state.ysize*2)) except struct.error: raise IOError("Truncated MSP file in row map") for x, rowlen in enumerate(rowmap): try: if rowlen == 0: img.write(blank_line) continue row = self.fd.read(rowlen) if len(row) != rowlen: raise IOError("Truncated MSP file, expected %d bytes on row %s", (rowlen, x)) idx = 0 while idx < rowlen: runtype = i8(row[idx]) idx += 1 if runtype == 0: (runcount, runval) = struct.unpack("Bc", row[idx:idx+2]) img.write(runval * runcount) idx += 2 else: runcount = runtype img.write(row[idx:idx+runcount]) idx += runcount except struct.error: raise IOError("Corrupted MSP file in row %d" %x) self.set_as_raw(img.getvalue(), ("1", 0, 1)) return 0,0 Image.register_decoder('MSP', MspDecoder) # # write MSP files (uncompressed only) def _save(im, fp, filename): if im.mode != "1": raise IOError("cannot write mode %s as MSP" % im.mode) # create MSP header header = [0] * 16 header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1 header[2], header[3] = im.size header[4], header[5] = 1, 1 header[6], header[7] = 1, 1 header[8], header[9] = im.size checksum = 0 for h in header: checksum = checksum ^ h header[12] = checksum # FIXME: is this the right field? # header for h in header: fp.write(o16(h)) # image body ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 32, ("1", 0, 1))]) # # registry Image.register_open(MspImageFile.format, MspImageFile, _accept) Image.register_save(MspImageFile.format, _save) Image.register_extension(MspImageFile.format, ".msp")
gpl-3.0
chaosmaker/pyload
module/plugins/internal/SimpleCrypter.py
2
3541
# -*- coding: utf-8 -*- """ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. @author: zoidberg """ import re from module.plugins.Crypter import Crypter from module.utils import html_unescape class SimpleCrypter(Crypter): __name__ = "SimpleCrypter" __version__ = "0.06" __pattern__ = None __type__ = "crypter" __description__ = """Base crypter plugin""" __author_name__ = ("stickell", "zoidberg") __author_mail__ = ("l.stickell@yahoo.it", "zoidberg@mujmail.cz") """ These patterns should be defined by each crypter: LINK_PATTERN: group(1) must be a download link example: <div class="link"><a href="(http://speedload.org/\w+) TITLE_PATTERN: (optional) the group defined by 'title' should be the title example: <title>Files of: (?P<title>[^<]+) folder</title> If it's impossible to extract the links using the LINK_PATTERN only you can override the getLinks method. If the links are disposed on multiple pages you need to define a pattern: PAGES_PATTERN: the group defined by 'pages' must be the total number of pages and a function: loadPage(self, page_n): must return the html of the page number 'page_n' """ def decrypt(self, pyfile): self.html = self.load(pyfile.url, decode=True) package_name, folder_name = self.getPackageNameAndFolder() self.package_links = self.getLinks() if hasattr(self, 'PAGES_PATTERN') and hasattr(self, 'loadPage'): self.handleMultiPages() self.logDebug('Package has %d links' % len(self.package_links)) if self.package_links: self.packages = [(package_name, self.package_links, folder_name)] else: self.fail('Could not extract any links') def getLinks(self): """ Returns the links extracted from self.html You should override this only if it's impossible to extract links using only the LINK_PATTERN. """ return re.findall(self.LINK_PATTERN, self.html) def getPackageNameAndFolder(self): if hasattr(self, 'TITLE_PATTERN'): m = re.search(self.TITLE_PATTERN, self.html) if m: name = folder = html_unescape(m.group('title').strip()) self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder)) return name, folder name = self.pyfile.package().name folder = self.pyfile.package().folder self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder)) return name, folder def handleMultiPages(self): pages = re.search(self.PAGES_PATTERN, self.html) if pages: pages = int(pages.group('pages')) else: pages = 1 for p in range(2, pages + 1): self.html = self.loadPage(p) self.package_links += self.getLinks()
gpl-3.0
avloss/serving
tensorflow_serving/example/inception_saved_model.py
2
7908
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== #!/usr/bin/env python2.7 """Export inception model given existing training checkpoints. The model is exported as SavedModel with proper signatures that can be loaded by standard tensorflow_model_server. """ import os.path # This is a placeholder for a Google-internal import. import tensorflow as tf from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import tag_constants from tensorflow.python.saved_model import utils from tensorflow.python.util import compat from inception import inception_model tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/inception_train', """Directory where to read training checkpoints.""") tf.app.flags.DEFINE_string('output_dir', '/tmp/inception_output', """Directory where to export inference model.""") tf.app.flags.DEFINE_integer('model_version', 1, """Version number of the model.""") tf.app.flags.DEFINE_integer('image_size', 299, """Needs to provide same value as in training.""") FLAGS = tf.app.flags.FLAGS NUM_CLASSES = 1000 NUM_TOP_CLASSES = 5 WORKING_DIR = os.path.dirname(os.path.realpath(__file__)) SYNSET_FILE = os.path.join(WORKING_DIR, 'imagenet_lsvrc_2015_synsets.txt') METADATA_FILE = os.path.join(WORKING_DIR, 'imagenet_metadata.txt') def export(): # Create index->synset mapping synsets = [] with open(SYNSET_FILE) as f: synsets = f.read().splitlines() # Create synset->metadata mapping texts = {} with open(METADATA_FILE) as f: for line in f.read().splitlines(): parts = line.split('\t') assert len(parts) == 2 texts[parts[0]] = parts[1] with tf.Graph().as_default(): # Build inference model. # Please refer to Tensorflow inception model for details. # Input transformation. serialized_tf_example = tf.placeholder(tf.string, name='tf_example') feature_configs = { 'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string), } tf_example = tf.parse_example(serialized_tf_example, feature_configs) jpegs = tf_example['image/encoded'] images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32) # Run inference. logits, _ = inception_model.inference(images, NUM_CLASSES + 1) # Transform output to topK result. values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES) # Create a constant string Tensor where the i'th element is # the human readable class description for the i'th index. # Note that the 0th index is an unused background class # (see inception model definition code). class_descriptions = ['unused background'] for s in synsets: class_descriptions.append(texts[s]) class_tensor = tf.constant(class_descriptions) classes = tf.contrib.lookup.index_to_string( tf.to_int64(indices), mapping=class_tensor) # Restore variables from training checkpoint. variable_averages = tf.train.ExponentialMovingAverage( inception_model.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: # Restore variables from training checkpoints. ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) # Assuming model_checkpoint_path looks something like: # /my-favorite-path/imagenet_train/model.ckpt-0, # extract global_step from it. global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] print 'Successfully loaded model from %s at step=%s.' % ( ckpt.model_checkpoint_path, global_step) else: print 'No checkpoint file found at %s' % FLAGS.checkpoint_dir return # Export inference model. output_path = os.path.join( compat.as_bytes(FLAGS.output_dir), compat.as_bytes(str(FLAGS.model_version))) print 'Exporting trained model to', output_path builder = saved_model_builder.SavedModelBuilder(output_path) # Build the signature_def_map. classify_inputs_tensor_info = utils.build_tensor_info( serialized_tf_example) classes_output_tensor_info = utils.build_tensor_info(classes) scores_output_tensor_info = utils.build_tensor_info(values) classification_signature = signature_def_utils.build_signature_def( inputs={ signature_constants.CLASSIFY_INPUTS: classify_inputs_tensor_info }, outputs={ signature_constants.CLASSIFY_OUTPUT_CLASSES: classes_output_tensor_info, signature_constants.CLASSIFY_OUTPUT_SCORES: scores_output_tensor_info }, method_name=signature_constants.CLASSIFY_METHOD_NAME) predict_inputs_tensor_info = utils.build_tensor_info(jpegs) prediction_signature = signature_def_utils.build_signature_def( inputs={'images': predict_inputs_tensor_info}, outputs={ 'classes': classes_output_tensor_info, 'scores': scores_output_tensor_info }, method_name=signature_constants.PREDICT_METHOD_NAME) legacy_init_op = tf.group( tf.initialize_all_tables(), name='legacy_init_op') builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ 'predict_images': prediction_signature, signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: classification_signature, }, legacy_init_op=legacy_init_op) builder.save() print 'Successfully exported model to %s' % FLAGS.output_dir def preprocess_image(image_buffer): """Preprocess JPEG encoded bytes to 3D float Tensor.""" # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3) # After this point, all image pixels reside in [0,1) # until the very end, when they're rescaled to (-1, 1). The various # adjust_* ops all require this range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region of the image with an area containing 87.5% of # the original image. image = tf.image.central_crop(image, central_fraction=0.875) # Resize the image to the original height and width. image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear( image, [FLAGS.image_size, FLAGS.image_size], align_corners=False) image = tf.squeeze(image, [0]) # Finally, rescale to [-1,1] instead of [0, 1) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image def main(unused_argv=None): export() if __name__ == '__main__': tf.app.run()
apache-2.0
cyandterry/Python-Study
Ninja/Leetcode/123_Best_Time_to_Buy_and_Sell_Stock_III.py
2
1138
""" Say you have an array for which the ith element is the price of a given stock on day i. Design an algorithm to find the maximum profit. You may complete at most two transactions. Note: You may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again). """ class Solution: # @param prices, a list of integer # @return an integer def maxProfit(self, prices): N = len(prices) if N <= 1: return 0 dp_1 = [0 for i in range(N)] dp_2 = [0 for i in range(N)] min_price = prices[0] i = 1 while i < N: min_price = min(min_price, prices[i]) dp_1[i] = max(dp_1[i-1], prices[i]-min_price) i+= 1 max_price = prices[-1] i = N-2 while i >= 0: max_price = max(max_price, prices[i]) dp_2[i] = max(dp_2[i+1], max_price-prices[i]) i -= 1 res = 0 for i in range(N): res = max(res, dp_1[i] + dp_2[i]) return res # Very similart to trapping rain water, from left to right then right to left
mit
dims/nova
nova/tests/unit/virt/libvirt/fakelibvirt.py
9
41011
# Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import uuid import fixtures from lxml import etree import six from nova.compute import arch from nova.virt.libvirt import config as vconfig # Allow passing None to the various connect methods # (i.e. allow the client to rely on default URLs) allow_default_uri_connection = True # Has libvirt connection been used at least once connection_used = False def _reset(): global allow_default_uri_connection allow_default_uri_connection = True # virDomainState VIR_DOMAIN_NOSTATE = 0 VIR_DOMAIN_RUNNING = 1 VIR_DOMAIN_BLOCKED = 2 VIR_DOMAIN_PAUSED = 3 VIR_DOMAIN_SHUTDOWN = 4 VIR_DOMAIN_SHUTOFF = 5 VIR_DOMAIN_CRASHED = 6 # NOTE(mriedem): These values come from include/libvirt/libvirt-domain.h VIR_DOMAIN_XML_SECURE = 1 VIR_DOMAIN_XML_INACTIVE = 2 VIR_DOMAIN_XML_UPDATE_CPU = 4 VIR_DOMAIN_XML_MIGRATABLE = 8 VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1 VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2 VIR_DOMAIN_BLOCK_REBASE_COPY = 8 VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC = 1 VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2 VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0 VIR_DOMAIN_EVENT_DEFINED = 0 VIR_DOMAIN_EVENT_UNDEFINED = 1 VIR_DOMAIN_EVENT_STARTED = 2 VIR_DOMAIN_EVENT_SUSPENDED = 3 VIR_DOMAIN_EVENT_RESUMED = 4 VIR_DOMAIN_EVENT_STOPPED = 5 VIR_DOMAIN_EVENT_SHUTDOWN = 6 VIR_DOMAIN_EVENT_PMSUSPENDED = 7 VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1 VIR_DOMAIN_AFFECT_CURRENT = 0 VIR_DOMAIN_AFFECT_LIVE = 1 VIR_DOMAIN_AFFECT_CONFIG = 2 VIR_CPU_COMPARE_ERROR = -1 VIR_CPU_COMPARE_INCOMPATIBLE = 0 VIR_CPU_COMPARE_IDENTICAL = 1 VIR_CPU_COMPARE_SUPERSET = 2 VIR_CRED_USERNAME = 1 VIR_CRED_AUTHNAME = 2 VIR_CRED_LANGUAGE = 3 VIR_CRED_CNONCE = 4 VIR_CRED_PASSPHRASE = 5 VIR_CRED_ECHOPROMPT = 6 VIR_CRED_NOECHOPROMPT = 7 VIR_CRED_REALM = 8 VIR_CRED_EXTERNAL = 9 VIR_MIGRATE_LIVE = 1 VIR_MIGRATE_PEER2PEER = 2 VIR_MIGRATE_TUNNELLED = 4 VIR_MIGRATE_PERSIST_DEST = 8 VIR_MIGRATE_UNDEFINE_SOURCE = 16 VIR_MIGRATE_NON_SHARED_INC = 128 VIR_NODE_CPU_STATS_ALL_CPUS = -1 VIR_DOMAIN_START_PAUSED = 1 # libvirtError enums # (Intentionally different from what's in libvirt. We do this to check, # that consumers of the library are using the symbolic names rather than # hardcoding the numerical values) VIR_FROM_QEMU = 100 VIR_FROM_DOMAIN = 200 VIR_FROM_NWFILTER = 330 VIR_FROM_REMOTE = 340 VIR_FROM_RPC = 345 VIR_FROM_NODEDEV = 666 VIR_ERR_INVALID_ARG = 8 VIR_ERR_NO_SUPPORT = 3 VIR_ERR_XML_DETAIL = 350 VIR_ERR_NO_DOMAIN = 420 VIR_ERR_OPERATION_FAILED = 510 VIR_ERR_OPERATION_INVALID = 55 VIR_ERR_OPERATION_TIMEOUT = 68 VIR_ERR_NO_NWFILTER = 620 VIR_ERR_SYSTEM_ERROR = 900 VIR_ERR_INTERNAL_ERROR = 950 VIR_ERR_CONFIG_UNSUPPORTED = 951 VIR_ERR_NO_NODE_DEVICE = 667 VIR_ERR_NO_SECRET = 66 # Readonly VIR_CONNECT_RO = 1 # virConnectBaselineCPU flags VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1 # snapshotCreateXML flags VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4 VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16 VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 # blockCommit flags VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4 # blockRebase flags VIR_DOMAIN_BLOCK_REBASE_RELATIVE = 8 VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1 VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2 # secret type VIR_SECRET_USAGE_TYPE_NONE = 0 VIR_SECRET_USAGE_TYPE_VOLUME = 1 VIR_SECRET_USAGE_TYPE_CEPH = 2 VIR_SECRET_USAGE_TYPE_ISCSI = 3 # Libvirt version FAKE_LIBVIRT_VERSION = 10002 class HostInfo(object): def __init__(self, arch=arch.X86_64, kB_mem=4096, cpus=2, cpu_mhz=800, cpu_nodes=1, cpu_sockets=1, cpu_cores=2, cpu_threads=1, cpu_model="Penryn", cpu_vendor="Intel", numa_topology='', cpu_disabled=None): """Create a new Host Info object :param arch: (string) indicating the CPU arch (eg 'i686' or whatever else uname -m might return) :param kB_mem: (int) memory size in KBytes :param cpus: (int) the number of active CPUs :param cpu_mhz: (int) expected CPU frequency :param cpu_nodes: (int) the number of NUMA cell, 1 for unusual NUMA topologies or uniform :param cpu_sockets: (int) number of CPU sockets per node if nodes > 1, total number of CPU sockets otherwise :param cpu_cores: (int) number of cores per socket :param cpu_threads: (int) number of threads per core :param cpu_model: CPU model :param cpu_vendor: CPU vendor :param numa_topology: Numa topology :param cpu_disabled: List of disabled cpus """ self.arch = arch self.kB_mem = kB_mem self.cpus = cpus self.cpu_mhz = cpu_mhz self.cpu_nodes = cpu_nodes self.cpu_cores = cpu_cores self.cpu_threads = cpu_threads self.cpu_sockets = cpu_sockets self.cpu_model = cpu_model self.cpu_vendor = cpu_vendor self.numa_topology = numa_topology self.disabled_cpus_list = cpu_disabled or [] @classmethod def _gen_numa_topology(self, cpu_nodes, cpu_sockets, cpu_cores, cpu_threads, kb_mem, numa_mempages_list=None): topology = vconfig.LibvirtConfigCapsNUMATopology() cpu_count = 0 for cell_count in range(cpu_nodes): cell = vconfig.LibvirtConfigCapsNUMACell() cell.id = cell_count cell.memory = kb_mem / cpu_nodes for socket_count in range(cpu_sockets): for cpu_num in range(cpu_cores * cpu_threads): cpu = vconfig.LibvirtConfigCapsNUMACPU() cpu.id = cpu_count cpu.socket_id = cell_count cpu.core_id = cpu_num // cpu_threads cpu.siblings = set([cpu_threads * (cpu_count // cpu_threads) + thread for thread in range(cpu_threads)]) cell.cpus.append(cpu) cpu_count += 1 # Set mempages per numa cell. if numa_mempages_list is empty # we will set only the default 4K pages. if numa_mempages_list: mempages = numa_mempages_list[cell_count] else: mempages = vconfig.LibvirtConfigCapsNUMAPages() mempages.size = 4 mempages.total = cell.memory / mempages.size mempages = [mempages] cell.mempages = mempages topology.cells.append(cell) return topology def get_numa_topology(self): return self.numa_topology VIR_DOMAIN_JOB_NONE = 0 VIR_DOMAIN_JOB_BOUNDED = 1 VIR_DOMAIN_JOB_UNBOUNDED = 2 VIR_DOMAIN_JOB_COMPLETED = 3 VIR_DOMAIN_JOB_FAILED = 4 VIR_DOMAIN_JOB_CANCELLED = 5 def _parse_disk_info(element): disk_info = {} disk_info['type'] = element.get('type', 'file') disk_info['device'] = element.get('device', 'disk') driver = element.find('./driver') if driver is not None: disk_info['driver_name'] = driver.get('name') disk_info['driver_type'] = driver.get('type') source = element.find('./source') if source is not None: disk_info['source'] = source.get('file') if not disk_info['source']: disk_info['source'] = source.get('dev') if not disk_info['source']: disk_info['source'] = source.get('path') target = element.find('./target') if target is not None: disk_info['target_dev'] = target.get('dev') disk_info['target_bus'] = target.get('bus') return disk_info def disable_event_thread(self): """Disable nova libvirt driver event thread. The Nova libvirt driver includes a native thread which monitors the libvirt event channel. In a testing environment this becomes problematic because it means we've got a floating thread calling sleep(1) over the life of the unit test. Seems harmless? It's not, because we sometimes want to test things like retry loops that should have specific sleep paterns. An unlucky firing of the libvirt thread will cause a test failure. """ # because we are patching a method in a class MonkeyPatch doesn't # auto import correctly. Import explicitly otherwise the patching # may silently fail. import nova.virt.libvirt.host # noqa def evloop(*args, **kwargs): pass self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.Host._init_events', evloop)) class libvirtError(Exception): """This class was copied and slightly modified from `libvirt-python:libvirt-override.py`. Since a test environment will use the real `libvirt-python` version of `libvirtError` if it's installed and not this fake, we need to maintain strict compatibility with the original class, including `__init__` args and instance-attributes. To create a libvirtError instance you should: # Create an unsupported error exception exc = libvirtError('my message') exc.err = (libvirt.VIR_ERR_NO_SUPPORT,) self.err is a tuple of form: (error_code, error_domain, error_message, error_level, str1, str2, str3, int1, int2) Alternatively, you can use the `make_libvirtError` convenience function to allow you to specify these attributes in one shot. """ def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None, vol=None): Exception.__init__(self, defmsg) self.err = None def get_error_code(self): if self.err is None: return None return self.err[0] def get_error_domain(self): if self.err is None: return None return self.err[1] def get_error_message(self): if self.err is None: return None return self.err[2] def get_error_level(self): if self.err is None: return None return self.err[3] def get_str1(self): if self.err is None: return None return self.err[4] def get_str2(self): if self.err is None: return None return self.err[5] def get_str3(self): if self.err is None: return None return self.err[6] def get_int1(self): if self.err is None: return None return self.err[7] def get_int2(self): if self.err is None: return None return self.err[8] class NWFilter(object): def __init__(self, connection, xml): self._connection = connection self._xml = xml self._parse_xml(xml) def _parse_xml(self, xml): tree = etree.fromstring(xml) root = tree.find('.') self._name = root.get('name') def undefine(self): self._connection._remove_filter(self) class NodeDevice(object): def __init__(self, connection, xml=None): self._connection = connection self._xml = xml if xml is not None: self._parse_xml(xml) def _parse_xml(self, xml): tree = etree.fromstring(xml) root = tree.find('.') self._name = root.get('name') def attach(self): pass def dettach(self): pass def reset(self): pass class Domain(object): def __init__(self, connection, xml, running=False, transient=False): self._connection = connection if running: connection._mark_running(self) self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF self._transient = transient self._def = self._parse_definition(xml) self._has_saved_state = False self._snapshots = {} self._id = self._connection._id_counter def _parse_definition(self, xml): try: tree = etree.fromstring(xml) except etree.ParseError: raise make_libvirtError( libvirtError, "Invalid XML.", error_code=VIR_ERR_XML_DETAIL, error_domain=VIR_FROM_DOMAIN) definition = {} name = tree.find('./name') if name is not None: definition['name'] = name.text uuid_elem = tree.find('./uuid') if uuid_elem is not None: definition['uuid'] = uuid_elem.text else: definition['uuid'] = str(uuid.uuid4()) vcpu = tree.find('./vcpu') if vcpu is not None: definition['vcpu'] = int(vcpu.text) memory = tree.find('./memory') if memory is not None: definition['memory'] = int(memory.text) os = {} os_type = tree.find('./os/type') if os_type is not None: os['type'] = os_type.text os['arch'] = os_type.get('arch', self._connection.host_info.arch) os_kernel = tree.find('./os/kernel') if os_kernel is not None: os['kernel'] = os_kernel.text os_initrd = tree.find('./os/initrd') if os_initrd is not None: os['initrd'] = os_initrd.text os_cmdline = tree.find('./os/cmdline') if os_cmdline is not None: os['cmdline'] = os_cmdline.text os_boot = tree.find('./os/boot') if os_boot is not None: os['boot_dev'] = os_boot.get('dev') definition['os'] = os features = {} acpi = tree.find('./features/acpi') if acpi is not None: features['acpi'] = True definition['features'] = features devices = {} device_nodes = tree.find('./devices') if device_nodes is not None: disks_info = [] disks = device_nodes.findall('./disk') for disk in disks: disks_info += [_parse_disk_info(disk)] devices['disks'] = disks_info nics_info = [] nics = device_nodes.findall('./interface') for nic in nics: nic_info = {} nic_info['type'] = nic.get('type') mac = nic.find('./mac') if mac is not None: nic_info['mac'] = mac.get('address') source = nic.find('./source') if source is not None: if nic_info['type'] == 'network': nic_info['source'] = source.get('network') elif nic_info['type'] == 'bridge': nic_info['source'] = source.get('bridge') nics_info += [nic_info] devices['nics'] = nics_info definition['devices'] = devices return definition def create(self): self.createWithFlags(0) def createWithFlags(self, flags): # FIXME: Not handling flags at the moment self._state = VIR_DOMAIN_RUNNING self._connection._mark_running(self) self._has_saved_state = False def isActive(self): return int(self._state == VIR_DOMAIN_RUNNING) def undefine(self): self._connection._undefine(self) def isPersistent(self): return True def undefineFlags(self, flags): self.undefine() if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE: if self.hasManagedSaveImage(0): self.managedSaveRemove() def destroy(self): self._state = VIR_DOMAIN_SHUTOFF self._connection._mark_not_running(self) def ID(self): return self._id def name(self): return self._def['name'] def UUIDString(self): return self._def['uuid'] def interfaceStats(self, device): return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3] def blockStats(self, device): return [2, 10000242400, 234, 2343424234, 34] def suspend(self): self._state = VIR_DOMAIN_PAUSED def shutdown(self): self._state = VIR_DOMAIN_SHUTDOWN self._connection._mark_not_running(self) def reset(self, flags): # FIXME: Not handling flags at the moment self._state = VIR_DOMAIN_RUNNING self._connection._mark_running(self) def info(self): return [self._state, int(self._def['memory']), int(self._def['memory']), self._def['vcpu'], 123456789] def migrateToURI(self, desturi, flags, dname, bandwidth): raise make_libvirtError( libvirtError, "Migration always fails for fake libvirt!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth): raise make_libvirtError( libvirtError, "Migration always fails for fake libvirt!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def migrateToURI3(self, dconnuri, params, logical_sum): raise make_libvirtError( libvirtError, "Migration always fails for fake libvirt!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def migrateSetMaxDowntime(self, downtime): pass def attachDevice(self, xml): disk_info = _parse_disk_info(etree.fromstring(xml)) disk_info['_attached'] = True self._def['devices']['disks'] += [disk_info] return True def attachDeviceFlags(self, xml, flags): if (flags & VIR_DOMAIN_AFFECT_LIVE and self._state != VIR_DOMAIN_RUNNING): raise make_libvirtError( libvirtError, "AFFECT_LIVE only allowed for running domains!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) self.attachDevice(xml) def detachDevice(self, xml): disk_info = _parse_disk_info(etree.fromstring(xml)) disk_info['_attached'] = True return disk_info in self._def['devices']['disks'] def detachDeviceFlags(self, xml, flags): self.detachDevice(xml) def setUserPassword(self, user, password, flags=0): pass def XMLDesc(self, flags): disks = '' for disk in self._def['devices']['disks']: disks += '''<disk type='%(type)s' device='%(device)s'> <driver name='%(driver_name)s' type='%(driver_type)s'/> <source file='%(source)s'/> <target dev='%(target_dev)s' bus='%(target_bus)s'/> <address type='drive' controller='0' bus='0' unit='0'/> </disk>''' % disk nics = '' for nic in self._def['devices']['nics']: nics += '''<interface type='%(type)s'> <mac address='%(mac)s'/> <source %(type)s='%(source)s'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> </interface>''' % nic return '''<domain type='kvm'> <name>%(name)s</name> <uuid>%(uuid)s</uuid> <memory>%(memory)s</memory> <currentMemory>%(memory)s</currentMemory> <vcpu>%(vcpu)s</vcpu> <os> <type arch='%(arch)s' machine='pc-0.12'>hvm</type> <boot dev='hd'/> </os> <features> <acpi/> <apic/> <pae/> </features> <clock offset='localtime'/> <on_poweroff>destroy</on_poweroff> <on_reboot>restart</on_reboot> <on_crash>restart</on_crash> <devices> <emulator>/usr/bin/kvm</emulator> %(disks)s <controller type='ide' index='0'> <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/> </controller> %(nics)s <serial type='file'> <source path='dummy.log'/> <target port='0'/> </serial> <serial type='pty'> <source pty='/dev/pts/27'/> <target port='1'/> </serial> <serial type='tcp'> <source host="-1" service="-1" mode="bind"/> </serial> <console type='file'> <source path='dummy.log'/> <target port='0'/> </console> <input type='tablet' bus='usb'/> <input type='mouse' bus='ps2'/> <graphics type='vnc' port='-1' autoport='yes'/> <graphics type='spice' port='-1' autoport='yes'/> <video> <model type='cirrus' vram='9216' heads='1'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/> </video> <memballoon model='virtio'> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </memballoon> </devices> </domain>''' % {'name': self._def['name'], 'uuid': self._def['uuid'], 'memory': self._def['memory'], 'vcpu': self._def['vcpu'], 'arch': self._def['os']['arch'], 'disks': disks, 'nics': nics} def managedSave(self, flags): self._connection._mark_not_running(self) self._has_saved_state = True def managedSaveRemove(self, flags): self._has_saved_state = False def hasManagedSaveImage(self, flags): return int(self._has_saved_state) def resume(self): self._state = VIR_DOMAIN_RUNNING def snapshotCreateXML(self, xml, flags): tree = etree.fromstring(xml) name = tree.find('./name').text snapshot = DomainSnapshot(name, self) self._snapshots[name] = snapshot return snapshot def vcpus(self): vcpus = ([], []) for i in range(0, self._def['vcpu']): vcpus[0].append((i, 1, 120405, i)) vcpus[1].append((True, True, True, True)) return vcpus def memoryStats(self): return {} def maxMemory(self): return self._def['memory'] def blockJobInfo(self, disk, flags): return {} def blockJobAbort(self, disk, flags): pass def blockResize(self, disk, size): pass def blockRebase(self, disk, base, bandwidth=0, flags=0): if (not base) and (flags and VIR_DOMAIN_BLOCK_REBASE_RELATIVE): raise make_libvirtError( libvirtError, 'flag VIR_DOMAIN_BLOCK_REBASE_RELATIVE is ' 'valid only with non-null base', error_code=VIR_ERR_INVALID_ARG, error_domain=VIR_FROM_QEMU) return 0 def blockCommit(self, disk, base, top, flags): return 0 def jobInfo(self): # NOTE(danms): This is an array of 12 integers, so just report # something to avoid an IndexError if we look at this return [0] * 12 def jobStats(self, flags=0): return {} def injectNMI(self, flags=0): return 0 def abortJob(self): pass def fsFreeze(self): pass def fsThaw(self): pass class DomainSnapshot(object): def __init__(self, name, domain): self._name = name self._domain = domain def delete(self, flags): del self._domain._snapshots[self._name] class Connection(object): def __init__(self, uri=None, readonly=False, version=FAKE_LIBVIRT_VERSION, hv_version=1001000, host_info=None): if not uri or uri == '': if allow_default_uri_connection: uri = 'qemu:///session' else: raise ValueError("URI was None, but fake libvirt is " "configured to not accept this.") uri_whitelist = ['qemu:///system', 'qemu:///session', 'lxc:///', # from LibvirtDriver._uri() 'xen:///', # from LibvirtDriver._uri() 'uml:///system', 'test:///default', 'parallels:///system'] if uri not in uri_whitelist: raise make_libvirtError( libvirtError, "libvirt error: no connection driver " "available for No connection for URI %s" % uri, error_code=5, error_domain=0) self.readonly = readonly self._uri = uri self._vms = {} self._running_vms = {} self._id_counter = 1 # libvirt reserves 0 for the hypervisor. self._nwfilters = {} self._nodedevs = {} self._event_callbacks = {} self.fakeLibVersion = version self.fakeVersion = hv_version self.host_info = host_info or HostInfo() def _add_filter(self, nwfilter): self._nwfilters[nwfilter._name] = nwfilter def _remove_filter(self, nwfilter): del self._nwfilters[nwfilter._name] def _add_nodedev(self, nodedev): self._nodedevs[nodedev._name] = nodedev def _remove_nodedev(self, nodedev): del self._nodedevs[nodedev._name] def _mark_running(self, dom): self._running_vms[self._id_counter] = dom self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0) self._id_counter += 1 def _mark_not_running(self, dom): if dom._transient: self._undefine(dom) dom._id = -1 for (k, v) in six.iteritems(self._running_vms): if v == dom: del self._running_vms[k] self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0) return def _undefine(self, dom): del self._vms[dom.name()] if not dom._transient: self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0) def getInfo(self): return [self.host_info.arch, self.host_info.kB_mem, self.host_info.cpus, self.host_info.cpu_mhz, self.host_info.cpu_nodes, self.host_info.cpu_sockets, self.host_info.cpu_cores, self.host_info.cpu_threads] def numOfDomains(self): return len(self._running_vms) def listDomainsID(self): return list(self._running_vms.keys()) def lookupByID(self, id): if id in self._running_vms: return self._running_vms[id] raise make_libvirtError( libvirtError, 'Domain not found: no domain with matching id %d' % id, error_code=VIR_ERR_NO_DOMAIN, error_domain=VIR_FROM_QEMU) def lookupByName(self, name): if name in self._vms: return self._vms[name] raise make_libvirtError( libvirtError, 'Domain not found: no domain with matching name "%s"' % name, error_code=VIR_ERR_NO_DOMAIN, error_domain=VIR_FROM_QEMU) def listAllDomains(self, flags): vms = [] for vm in self._vms: if flags & VIR_CONNECT_LIST_DOMAINS_ACTIVE: if vm.state != VIR_DOMAIN_SHUTOFF: vms.append(vm) if flags & VIR_CONNECT_LIST_DOMAINS_INACTIVE: if vm.state == VIR_DOMAIN_SHUTOFF: vms.append(vm) return vms def _emit_lifecycle(self, dom, event, detail): if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks: return cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE] callback = cbinfo[0] opaque = cbinfo[1] callback(self, dom, event, detail, opaque) def defineXML(self, xml): dom = Domain(connection=self, running=False, transient=False, xml=xml) self._vms[dom.name()] = dom self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0) return dom def createXML(self, xml, flags): dom = Domain(connection=self, running=True, transient=True, xml=xml) self._vms[dom.name()] = dom self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0) return dom def getType(self): if self._uri == 'qemu:///system': return 'QEMU' def getLibVersion(self): return self.fakeLibVersion def getVersion(self): return self.fakeVersion def getHostname(self): return 'compute1' def domainEventRegisterAny(self, dom, eventid, callback, opaque): self._event_callbacks[eventid] = [callback, opaque] def registerCloseCallback(self, cb, opaque): pass def getCPUMap(self): """Return calculated CPU map from HostInfo, by default showing 2 online CPUs. """ active_cpus = self.host_info.cpus total_cpus = active_cpus + len(self.host_info.disabled_cpus_list) cpu_map = [True if cpu_num not in self.host_info.disabled_cpus_list else False for cpu_num in range(total_cpus)] return (total_cpus, cpu_map, active_cpus) def getCapabilities(self): """Return spoofed capabilities.""" numa_topology = self.host_info.get_numa_topology() if isinstance(numa_topology, vconfig.LibvirtConfigCapsNUMATopology): numa_topology = numa_topology.to_xml() return '''<capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='%(sockets)s' cores='%(cores)s' threads='%(threads)s'/> <feature name='xtpr'/> <feature name='tm2'/> <feature name='est'/> <feature name='vmx'/> <feature name='ds_cpl'/> <feature name='monitor'/> <feature name='pbe'/> <feature name='tm'/> <feature name='ht'/> <feature name='ss'/> <feature name='acpi'/> <feature name='ds'/> <feature name='vme'/> </cpu> <migration_features> <live/> <uri_transports> <uri_transport>tcp</uri_transport> </uri_transports> </migration_features> %(topology)s <secmodel> <model>apparmor</model> <doi>0</doi> </secmodel> </host> <guest> <os_type>hvm</os_type> <arch name='i686'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu</emulator> <machine>pc-0.14</machine> <machine canonical='pc-0.14'>pc</machine> <machine>pc-0.13</machine> <machine>pc-0.12</machine> <machine>pc-0.11</machine> <machine>pc-0.10</machine> <machine>isapc</machine> <domain type='qemu'> </domain> <domain type='kvm'> <emulator>/usr/bin/kvm</emulator> <machine>pc-0.14</machine> <machine canonical='pc-0.14'>pc</machine> <machine>pc-0.13</machine> <machine>pc-0.12</machine> <machine>pc-0.11</machine> <machine>pc-0.10</machine> <machine>isapc</machine> </domain> </arch> <features> <cpuselection/> <deviceboot/> <pae/> <nonpae/> <acpi default='on' toggle='yes'/> <apic default='on' toggle='no'/> </features> </guest> <guest> <os_type>hvm</os_type> <arch name='x86_64'> <wordsize>64</wordsize> <emulator>/usr/bin/qemu-system-x86_64</emulator> <machine>pc-0.14</machine> <machine canonical='pc-0.14'>pc</machine> <machine>pc-0.13</machine> <machine>pc-0.12</machine> <machine>pc-0.11</machine> <machine>pc-0.10</machine> <machine>isapc</machine> <domain type='qemu'> </domain> <domain type='kvm'> <emulator>/usr/bin/kvm</emulator> <machine>pc-0.14</machine> <machine canonical='pc-0.14'>pc</machine> <machine>pc-0.13</machine> <machine>pc-0.12</machine> <machine>pc-0.11</machine> <machine>pc-0.10</machine> <machine>isapc</machine> </domain> </arch> <features> <cpuselection/> <deviceboot/> <acpi default='on' toggle='yes'/> <apic default='on' toggle='no'/> </features> </guest> <guest> <os_type>hvm</os_type> <arch name='armv7l'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu-system-arm</emulator> <machine>integratorcp</machine> <machine>vexpress-a9</machine> <machine>syborg</machine> <machine>musicpal</machine> <machine>mainstone</machine> <machine>n800</machine> <machine>n810</machine> <machine>n900</machine> <machine>cheetah</machine> <machine>sx1</machine> <machine>sx1-v1</machine> <machine>beagle</machine> <machine>beaglexm</machine> <machine>tosa</machine> <machine>akita</machine> <machine>spitz</machine> <machine>borzoi</machine> <machine>terrier</machine> <machine>connex</machine> <machine>verdex</machine> <machine>lm3s811evb</machine> <machine>lm3s6965evb</machine> <machine>realview-eb</machine> <machine>realview-eb-mpcore</machine> <machine>realview-pb-a8</machine> <machine>realview-pbx-a9</machine> <machine>versatilepb</machine> <machine>versatileab</machine> <domain type='qemu'> </domain> </arch> <features> <deviceboot/> </features> </guest> <guest> <os_type>hvm</os_type> <arch name='mips'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu-system-mips</emulator> <machine>malta</machine> <machine>mipssim</machine> <machine>magnum</machine> <machine>pica61</machine> <machine>mips</machine> <domain type='qemu'> </domain> </arch> <features> <deviceboot/> </features> </guest> <guest> <os_type>hvm</os_type> <arch name='mipsel'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu-system-mipsel</emulator> <machine>malta</machine> <machine>mipssim</machine> <machine>magnum</machine> <machine>pica61</machine> <machine>mips</machine> <domain type='qemu'> </domain> </arch> <features> <deviceboot/> </features> </guest> <guest> <os_type>hvm</os_type> <arch name='sparc'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu-system-sparc</emulator> <machine>SS-5</machine> <machine>leon3_generic</machine> <machine>SS-10</machine> <machine>SS-600MP</machine> <machine>SS-20</machine> <machine>Voyager</machine> <machine>LX</machine> <machine>SS-4</machine> <machine>SPARCClassic</machine> <machine>SPARCbook</machine> <machine>SS-1000</machine> <machine>SS-2000</machine> <machine>SS-2</machine> <domain type='qemu'> </domain> </arch> </guest> <guest> <os_type>hvm</os_type> <arch name='ppc'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu-system-ppc</emulator> <machine>g3beige</machine> <machine>virtex-ml507</machine> <machine>mpc8544ds</machine> <machine canonical='bamboo-0.13'>bamboo</machine> <machine>bamboo-0.13</machine> <machine>bamboo-0.12</machine> <machine>ref405ep</machine> <machine>taihu</machine> <machine>mac99</machine> <machine>prep</machine> <domain type='qemu'> </domain> </arch> <features> <deviceboot/> </features> </guest> </capabilities>''' % {'sockets': self.host_info.cpu_sockets, 'cores': self.host_info.cpu_cores, 'threads': self.host_info.cpu_threads, 'topology': numa_topology} def compareCPU(self, xml, flags): tree = etree.fromstring(xml) arch_node = tree.find('./arch') if arch_node is not None: if arch_node.text not in [arch.X86_64, arch.I686]: return VIR_CPU_COMPARE_INCOMPATIBLE model_node = tree.find('./model') if model_node is not None: if model_node.text != self.host_info.cpu_model: return VIR_CPU_COMPARE_INCOMPATIBLE vendor_node = tree.find('./vendor') if vendor_node is not None: if vendor_node.text != self.host_info.cpu_vendor: return VIR_CPU_COMPARE_INCOMPATIBLE # The rest of the stuff libvirt implements is rather complicated # and I don't think it adds much value to replicate it here. return VIR_CPU_COMPARE_IDENTICAL def getCPUStats(self, cpuNum, flag): if cpuNum < 2: return {'kernel': 5664160000000, 'idle': 1592705190000000, 'user': 26728850000000, 'iowait': 6121490000000} else: raise make_libvirtError( libvirtError, "invalid argument: Invalid cpu number", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def nwfilterLookupByName(self, name): try: return self._nwfilters[name] except KeyError: raise make_libvirtError( libvirtError, "no nwfilter with matching name %s" % name, error_code=VIR_ERR_NO_NWFILTER, error_domain=VIR_FROM_NWFILTER) def nwfilterDefineXML(self, xml): nwfilter = NWFilter(self, xml) self._add_filter(nwfilter) def nodeDeviceLookupByName(self, name): try: return self._nodedevs[name] except KeyError: raise make_libvirtError( libvirtError, "no nodedev with matching name %s" % name, error_code=VIR_ERR_NO_NODE_DEVICE, error_domain=VIR_FROM_NODEDEV) def listDefinedDomains(self): return [] def listDevices(self, cap, flags): return [] def baselineCPU(self, cpu, flag): """Add new libvirt API.""" return """<cpu mode='custom' match='exact'> <model>Penryn</model> <vendor>Intel</vendor> <feature name='xtpr'/> <feature name='tm2'/> <feature name='est'/> <feature name='vmx'/> <feature name='ds_cpl'/> <feature name='monitor'/> <feature name='pbe'/> <feature name='tm'/> <feature name='ht'/> <feature name='ss'/> <feature name='acpi'/> <feature name='ds'/> <feature name='vme'/> <feature policy='require' name='aes'/> </cpu>""" def secretLookupByUsage(self, usage_type_obj, usage_id): pass def secretDefineXML(self, xml): pass def openAuth(uri, auth, flags=0): if type(auth) != list: raise Exception("Expected a list for 'auth' parameter") if type(auth[0]) != list: raise Exception("Expected a function in 'auth[0]' parameter") if not callable(auth[1]): raise Exception("Expected a function in 'auth[1]' parameter") return Connection(uri, (flags == VIR_CONNECT_RO)) def virEventRunDefaultImpl(): time.sleep(1) def virEventRegisterDefaultImpl(): if connection_used: raise Exception("virEventRegisterDefaultImpl() must be " "called before connection is used.") def registerErrorHandler(handler, ctxt): pass def make_libvirtError(error_class, msg, error_code=None, error_domain=None, error_message=None, error_level=None, str1=None, str2=None, str3=None, int1=None, int2=None): """Convenience function for creating `libvirtError` exceptions which allow you to specify arguments in constructor without having to manipulate the `err` tuple directly. We need to pass in `error_class` to this function because it may be `libvirt.libvirtError` or `fakelibvirt.libvirtError` depending on whether `libvirt-python` is installed. """ exc = error_class(msg) exc.err = (error_code, error_domain, error_message, error_level, str1, str2, str3, int1, int2) return exc virDomain = Domain virNodeDevice = NodeDevice virConnect = Connection class FakeLibvirtFixture(fixtures.Fixture): """Performs global setup/stubbing for all libvirt tests. """ def setUp(self): super(FakeLibvirtFixture, self).setUp() disable_event_thread(self)
apache-2.0
petrutlucian94/nova_dev
nova/tests/api/openstack/compute/contrib/test_volumes.py
4
34779
# Copyright 2013 Josh Durgin # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from lxml import etree from oslo.config import cfg import webob from webob import exc from nova.api.openstack.compute.contrib import assisted_volume_snapshots as \ assisted_snaps from nova.api.openstack.compute.contrib import volumes from nova.api.openstack import extensions from nova.compute import api as compute_api from nova.compute import flavors from nova import context from nova import exception from nova.openstack.common import jsonutils from nova.openstack.common import timeutils from nova import test from nova.tests.api.openstack import fakes from nova.volume import cinder CONF = cfg.CONF CONF.import_opt('password_length', 'nova.utils') FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000' FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc' FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd' IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' def fake_get_instance(self, context, instance_id, want_objects=False): return {'uuid': instance_id} def fake_get_volume(self, context, id): return {'id': 'woot'} def fake_attach_volume(self, context, instance, volume_id, device): pass def fake_detach_volume(self, context, instance, volume): pass def fake_swap_volume(self, context, instance, old_volume_id, new_volume_id): pass def fake_create_snapshot(self, context, volume, name, description): return {'id': 123, 'volume_id': 'fakeVolId', 'status': 'available', 'volume_size': 123, 'created_at': '2013-01-01 00:00:01', 'display_name': 'myVolumeName', 'display_description': 'myVolumeDescription'} def fake_delete_snapshot(self, context, snapshot_id): pass def fake_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id, delete_info): pass def fake_compute_volume_snapshot_create(self, context, volume_id, create_info): pass def fake_get_instance_bdms(self, context, instance): return [{'id': 1, 'instance_uuid': instance['uuid'], 'device_name': '/dev/fake0', 'delete_on_termination': 'False', 'virtual_name': 'MyNamesVirtual', 'snapshot_id': None, 'volume_id': FAKE_UUID_A, 'volume_size': 1}, {'id': 2, 'instance_uuid': instance['uuid'], 'device_name': '/dev/fake1', 'delete_on_termination': 'False', 'virtual_name': 'MyNamesVirtual', 'snapshot_id': None, 'volume_id': FAKE_UUID_B, 'volume_size': 1}] def fake_volume_actions_to_locked_server(self, context, instance, volume): raise exception.InstanceIsLocked(instance_uuid=instance['uuid']) def fake_volume_actions_to_locked_server(self, context, instance, volume_id, device=None): raise exception.InstanceIsLocked(instance_uuid=instance['uuid']) class BootFromVolumeTest(test.TestCase): def setUp(self): super(BootFromVolumeTest, self).setUp() self.stubs.Set(compute_api.API, 'create', self._get_fake_compute_api_create()) fakes.stub_out_nw_api(self.stubs) self._block_device_mapping_seen = None self._legacy_bdm_seen = True self.flags( osapi_compute_extension=[ 'nova.api.openstack.compute.contrib.select_extensions'], osapi_compute_ext_list=['Volumes', 'Block_device_mapping_v2_boot']) def _get_fake_compute_api_create(self): def _fake_compute_api_create(cls, context, instance_type, image_href, **kwargs): self._block_device_mapping_seen = kwargs.get( 'block_device_mapping') self._legacy_bdm_seen = kwargs.get('legacy_bdm') inst_type = flavors.get_flavor_by_flavor_id(2) resv_id = None return ([{'id': 1, 'display_name': 'test_server', 'uuid': FAKE_UUID, 'instance_type': dict(inst_type), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': 'fead::1234', 'image_ref': IMAGE_UUID, 'user_id': 'fake', 'project_id': 'fake', 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0), 'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0), 'progress': 0, 'fixed_ips': [] }], resv_id) return _fake_compute_api_create def test_create_root_volume(self): body = dict(server=dict( name='test_server', imageRef=IMAGE_UUID, flavorRef=2, min_count=1, max_count=1, block_device_mapping=[dict( volume_id=1, device_name='/dev/vda', virtual='root', delete_on_termination=False, )] )) req = webob.Request.blank('/v2/fake/os-volumes_boot') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app( init_only=('os-volumes_boot', 'servers'))) self.assertEqual(res.status_int, 202) server = jsonutils.loads(res.body)['server'] self.assertEqual(FAKE_UUID, server['id']) self.assertEqual(CONF.password_length, len(server['adminPass'])) self.assertEqual(len(self._block_device_mapping_seen), 1) self.assertTrue(self._legacy_bdm_seen) self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1) self.assertEqual(self._block_device_mapping_seen[0]['device_name'], '/dev/vda') def test_create_root_volume_bdm_v2(self): body = dict(server=dict( name='test_server', imageRef=IMAGE_UUID, flavorRef=2, min_count=1, max_count=1, block_device_mapping_v2=[dict( source_type='volume', uuid=1, device_name='/dev/vda', boot_index=0, delete_on_termination=False, )] )) req = webob.Request.blank('/v2/fake/os-volumes_boot') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app( init_only=('os-volumes_boot', 'servers'))) self.assertEqual(res.status_int, 202) server = jsonutils.loads(res.body)['server'] self.assertEqual(FAKE_UUID, server['id']) self.assertEqual(CONF.password_length, len(server['adminPass'])) self.assertEqual(len(self._block_device_mapping_seen), 1) self.assertFalse(self._legacy_bdm_seen) self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1) self.assertEqual(self._block_device_mapping_seen[0]['boot_index'], 0) self.assertEqual(self._block_device_mapping_seen[0]['device_name'], '/dev/vda') class VolumeApiTest(test.TestCase): def setUp(self): super(VolumeApiTest, self).setUp() fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete) self.stubs.Set(cinder.API, "get", fakes.stub_volume_get) self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all) self.flags( osapi_compute_extension=[ 'nova.api.openstack.compute.contrib.select_extensions'], osapi_compute_ext_list=['Volumes']) self.context = context.get_admin_context() self.app = fakes.wsgi_app(init_only=('os-volumes',)) def test_volume_create(self): self.stubs.Set(cinder.API, "create", fakes.stub_volume_create) vol = {"size": 100, "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1"} body = {"volume": vol} req = webob.Request.blank('/v2/fake/os-volumes') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers['content-type'] = 'application/json' resp = req.get_response(self.app) self.assertEqual(resp.status_int, 200) resp_dict = jsonutils.loads(resp.body) self.assertIn('volume', resp_dict) self.assertEqual(resp_dict['volume']['size'], vol['size']) self.assertEqual(resp_dict['volume']['displayName'], vol['display_name']) self.assertEqual(resp_dict['volume']['displayDescription'], vol['display_description']) self.assertEqual(resp_dict['volume']['availabilityZone'], vol['availability_zone']) def test_volume_create_bad(self): def fake_volume_create(self, context, size, name, description, snapshot, **param): raise exception.InvalidInput(reason="bad request data") self.stubs.Set(cinder.API, "create", fake_volume_create) vol = {"size": '#$?', "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1"} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/fake/os-volumes') self.assertRaises(webob.exc.HTTPBadRequest, volumes.VolumeController().create, req, body) def test_volume_index(self): req = webob.Request.blank('/v2/fake/os-volumes') resp = req.get_response(self.app) self.assertEqual(resp.status_int, 200) def test_volume_detail(self): req = webob.Request.blank('/v2/fake/os-volumes/detail') resp = req.get_response(self.app) self.assertEqual(resp.status_int, 200) def test_volume_show(self): req = webob.Request.blank('/v2/fake/os-volumes/123') resp = req.get_response(self.app) self.assertEqual(resp.status_int, 200) def test_volume_show_no_volume(self): self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound) req = webob.Request.blank('/v2/fake/os-volumes/456') resp = req.get_response(self.app) self.assertEqual(resp.status_int, 404) def test_volume_delete(self): req = webob.Request.blank('/v2/fake/os-volumes/123') req.method = 'DELETE' resp = req.get_response(self.app) self.assertEqual(resp.status_int, 202) def test_volume_delete_no_volume(self): self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound) req = webob.Request.blank('/v2/fake/os-volumes/456') req.method = 'DELETE' resp = req.get_response(self.app) self.assertEqual(resp.status_int, 404) class VolumeAttachTests(test.TestCase): def setUp(self): super(VolumeAttachTests, self).setUp() self.stubs.Set(compute_api.API, 'get_instance_bdms', fake_get_instance_bdms) self.stubs.Set(compute_api.API, 'get', fake_get_instance) self.stubs.Set(cinder.API, 'get', fake_get_volume) self.context = context.get_admin_context() self.expected_show = {'volumeAttachment': {'device': '/dev/fake0', 'serverId': FAKE_UUID, 'id': FAKE_UUID_A, 'volumeId': FAKE_UUID_A }} self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.attachments = volumes.VolumeAttachmentController(self.ext_mgr) def test_show(self): req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid') req.method = 'POST' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A) self.assertEqual(self.expected_show, result) def test_detach(self): self.stubs.Set(compute_api.API, 'detach_volume', fake_detach_volume) req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid') req.method = 'DELETE' req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context result = self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A) self.assertEqual('202 Accepted', result.status) def test_detach_vol_not_found(self): self.stubs.Set(compute_api.API, 'detach_volume', fake_detach_volume) req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid') req.method = 'DELETE' req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPNotFound, self.attachments.delete, req, FAKE_UUID, FAKE_UUID_C) def test_detach_volume_from_locked_server(self): def fake_detach_volume_from_locked_server(self, context, instance, volume): raise exception.InstanceIsLocked(instance_uuid=instance['uuid']) self.stubs.Set(compute_api.API, 'detach_volume', fake_detach_volume_from_locked_server) req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid') req.method = 'DELETE' req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(webob.exc.HTTPConflict, self.attachments.delete, req, FAKE_UUID, FAKE_UUID_A) def test_attach_volume(self): self.stubs.Set(compute_api.API, 'attach_volume', fake_attach_volume) body = {'volumeAttachment': {'volumeId': FAKE_UUID_A, 'device': '/dev/fake'}} req = webob.Request.blank('/v2/servers/id/os-volume_attachments') req.method = 'POST' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context result = self.attachments.create(req, FAKE_UUID, body) self.assertEqual(result['volumeAttachment']['id'], '00000000-aaaa-aaaa-aaaa-000000000000') def test_attach_volume_to_locked_server(self): def fake_attach_volume_to_locked_server(self, context, instance, volume_id, device=None): raise exception.InstanceIsLocked(instance_uuid=instance['uuid']) self.stubs.Set(compute_api.API, 'attach_volume', fake_attach_volume_to_locked_server) body = {'volumeAttachment': {'volumeId': FAKE_UUID_A, 'device': '/dev/fake'}} req = webob.Request.blank('/v2/servers/id/os-volume_attachments') req.method = 'POST' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(webob.exc.HTTPConflict, self.attachments.create, req, FAKE_UUID, body) def test_attach_volume_bad_id(self): self.stubs.Set(compute_api.API, 'attach_volume', fake_attach_volume) body = { 'volumeAttachment': { 'device': None, 'volumeId': 'TESTVOLUME', } } req = webob.Request.blank('/v2/servers/id/os-volume_attachments') req.method = 'POST' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(webob.exc.HTTPBadRequest, self.attachments.create, req, FAKE_UUID, body) def _test_swap(self, uuid=FAKE_UUID_A, fake_func=None): fake_func = fake_func or fake_swap_volume self.stubs.Set(compute_api.API, 'swap_volume', fake_func) body = {'volumeAttachment': {'volumeId': FAKE_UUID_B, 'device': '/dev/fake'}} req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid') req.method = 'PUT' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context return self.attachments.update(req, FAKE_UUID, uuid, body) def test_swap_volume_for_locked_server(self): self.ext_mgr.extensions['os-volume-attachment-update'] = True def fake_swap_volume_for_locked_server(self, context, instance, old_volume, new_volume): raise exception.InstanceIsLocked(instance_uuid=instance['uuid']) self.ext_mgr.extensions['os-volume-attachment-update'] = True self.assertRaises(webob.exc.HTTPConflict, self._test_swap, fake_func=fake_swap_volume_for_locked_server) def test_swap_volume_no_extension(self): self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap) def test_swap_volume(self): self.ext_mgr.extensions['os-volume-attachment-update'] = True result = self._test_swap() self.assertEqual('202 Accepted', result.status) def test_swap_volume_no_attachment(self): self.ext_mgr.extensions['os-volume-attachment-update'] = True self.assertRaises(exc.HTTPNotFound, self._test_swap, FAKE_UUID_C) class VolumeSerializerTest(test.TestCase): def _verify_volume_attachment(self, attach, tree): for attr in ('id', 'volumeId', 'serverId', 'device'): self.assertEqual(str(attach[attr]), tree.get(attr)) def _verify_volume(self, vol, tree): self.assertEqual(tree.tag, 'volume') for attr in ('id', 'status', 'size', 'availabilityZone', 'createdAt', 'displayName', 'displayDescription', 'volumeType', 'snapshotId'): self.assertEqual(str(vol[attr]), tree.get(attr)) for child in tree: self.assertIn(child.tag, ('attachments', 'metadata')) if child.tag == 'attachments': self.assertEqual(1, len(child)) self.assertEqual('attachment', child[0].tag) self._verify_volume_attachment(vol['attachments'][0], child[0]) elif child.tag == 'metadata': not_seen = set(vol['metadata'].keys()) for gr_child in child: self.assertIn(gr_child.get("key"), not_seen) self.assertEqual(str(vol['metadata'][gr_child.get("key")]), gr_child.text) not_seen.remove(gr_child.get("key")) self.assertEqual(0, len(not_seen)) def test_attach_show_create_serializer(self): serializer = volumes.VolumeAttachmentTemplate() raw_attach = dict( id='vol_id', volumeId='vol_id', serverId='instance_uuid', device='/foo') text = serializer.serialize(dict(volumeAttachment=raw_attach)) tree = etree.fromstring(text) self.assertEqual('volumeAttachment', tree.tag) self._verify_volume_attachment(raw_attach, tree) def test_attach_index_serializer(self): serializer = volumes.VolumeAttachmentsTemplate() raw_attaches = [dict( id='vol_id1', volumeId='vol_id1', serverId='instance1_uuid', device='/foo1'), dict( id='vol_id2', volumeId='vol_id2', serverId='instance2_uuid', device='/foo2')] text = serializer.serialize(dict(volumeAttachments=raw_attaches)) tree = etree.fromstring(text) self.assertEqual('volumeAttachments', tree.tag) self.assertEqual(len(raw_attaches), len(tree)) for idx, child in enumerate(tree): self.assertEqual('volumeAttachment', child.tag) self._verify_volume_attachment(raw_attaches[idx], child) def test_volume_show_create_serializer(self): serializer = volumes.VolumeTemplate() raw_volume = dict( id='vol_id', status='vol_status', size=1024, availabilityZone='vol_availability', createdAt=timeutils.utcnow(), attachments=[dict( id='vol_id', volumeId='vol_id', serverId='instance_uuid', device='/foo')], displayName='vol_name', displayDescription='vol_desc', volumeType='vol_type', snapshotId='snap_id', metadata=dict( foo='bar', baz='quux', ), ) text = serializer.serialize(dict(volume=raw_volume)) tree = etree.fromstring(text) self._verify_volume(raw_volume, tree) def test_volume_index_detail_serializer(self): serializer = volumes.VolumesTemplate() raw_volumes = [dict( id='vol1_id', status='vol1_status', size=1024, availabilityZone='vol1_availability', createdAt=timeutils.utcnow(), attachments=[dict( id='vol1_id', volumeId='vol1_id', serverId='instance_uuid', device='/foo1')], displayName='vol1_name', displayDescription='vol1_desc', volumeType='vol1_type', snapshotId='snap1_id', metadata=dict( foo='vol1_foo', bar='vol1_bar', ), ), dict( id='vol2_id', status='vol2_status', size=1024, availabilityZone='vol2_availability', createdAt=timeutils.utcnow(), attachments=[dict( id='vol2_id', volumeId='vol2_id', serverId='instance_uuid', device='/foo2')], displayName='vol2_name', displayDescription='vol2_desc', volumeType='vol2_type', snapshotId='snap2_id', metadata=dict( foo='vol2_foo', bar='vol2_bar', ), )] text = serializer.serialize(dict(volumes=raw_volumes)) tree = etree.fromstring(text) self.assertEqual('volumes', tree.tag) self.assertEqual(len(raw_volumes), len(tree)) for idx, child in enumerate(tree): self._verify_volume(raw_volumes[idx], child) class TestVolumeCreateRequestXMLDeserializer(test.TestCase): def setUp(self): super(TestVolumeCreateRequestXMLDeserializer, self).setUp() self.deserializer = volumes.CreateDeserializer() def test_minimal_volume(self): self_request = """ <volume xmlns="http://docs.openstack.org/compute/api/v1.1" size="1"></volume>""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", }, } self.assertEqual(request['body'], expected) def test_display_name(self): self_request = """ <volume xmlns="http://docs.openstack.org/compute/api/v1.1" size="1" display_name="Volume-xml"></volume>""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", }, } self.assertEqual(request['body'], expected) def test_display_description(self): self_request = """ <volume xmlns="http://docs.openstack.org/compute/api/v1.1" size="1" display_name="Volume-xml" display_description="description"></volume>""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", "display_description": "description", }, } self.assertEqual(request['body'], expected) def test_volume_type(self): self_request = """ <volume xmlns="http://docs.openstack.org/compute/api/v1.1" size="1" display_name="Volume-xml" display_description="description" volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "display_name": "Volume-xml", "size": "1", "display_name": "Volume-xml", "display_description": "description", "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", }, } self.assertEqual(request['body'], expected) def test_availability_zone(self): self_request = """ <volume xmlns="http://docs.openstack.org/compute/api/v1.1" size="1" display_name="Volume-xml" display_description="description" volume_type="289da7f8-6440-407c-9fb4-7db01ec49164" availability_zone="us-east1"></volume>""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", "display_description": "description", "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", "availability_zone": "us-east1", }, } self.assertEqual(request['body'], expected) def test_metadata(self): self_request = """ <volume xmlns="http://docs.openstack.org/compute/api/v1.1" display_name="Volume-xml" size="1"> <metadata><meta key="Type">work</meta></metadata></volume>""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "display_name": "Volume-xml", "size": "1", "metadata": { "Type": "work", }, }, } self.assertEqual(request['body'], expected) def test_full_volume(self): self_request = """ <volume xmlns="http://docs.openstack.org/compute/api/v1.1" size="1" display_name="Volume-xml" display_description="description" volume_type="289da7f8-6440-407c-9fb4-7db01ec49164" availability_zone="us-east1"> <metadata><meta key="Type">work</meta></metadata></volume>""" request = self.deserializer.deserialize(self_request) expected = { "volume": { "size": "1", "display_name": "Volume-xml", "display_description": "description", "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", "availability_zone": "us-east1", "metadata": { "Type": "work", }, }, } self.maxDiff = None self.assertEqual(request['body'], expected) class CommonUnprocessableEntityTestCase(object): resource = None entity_name = None controller_cls = None kwargs = {} """ Tests of places we throw 422 Unprocessable Entity from """ def setUp(self): super(CommonUnprocessableEntityTestCase, self).setUp() self.controller = self.controller_cls() def _unprocessable_create(self, body): req = fakes.HTTPRequest.blank('/v2/fake/' + self.resource) req.method = 'POST' kwargs = self.kwargs.copy() kwargs['body'] = body self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, **kwargs) def test_create_no_body(self): self._unprocessable_create(body=None) def test_create_missing_volume(self): body = {'foo': {'a': 'b'}} self._unprocessable_create(body=body) def test_create_malformed_entity(self): body = {self.entity_name: 'string'} self._unprocessable_create(body=body) class UnprocessableVolumeTestCase(CommonUnprocessableEntityTestCase, test.TestCase): resource = 'os-volumes' entity_name = 'volume' controller_cls = volumes.VolumeController class UnprocessableAttachmentTestCase(CommonUnprocessableEntityTestCase, test.TestCase): resource = 'servers/' + FAKE_UUID + '/os-volume_attachments' entity_name = 'volumeAttachment' controller_cls = volumes.VolumeAttachmentController kwargs = {'server_id': FAKE_UUID} class UnprocessableSnapshotTestCase(CommonUnprocessableEntityTestCase, test.TestCase): resource = 'os-snapshots' entity_name = 'snapshot' controller_cls = volumes.SnapshotController class CreateSnapshotTestCase(test.TestCase): def setUp(self): super(CreateSnapshotTestCase, self).setUp() self.controller = volumes.SnapshotController() self.stubs.Set(cinder.API, 'get', fake_get_volume) self.stubs.Set(cinder.API, 'create_snapshot_force', fake_create_snapshot) self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot) self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots') self.req.method = 'POST' self.body = {'snapshot': {'volume_id': 1}} def test_force_true(self): self.body['snapshot']['force'] = 'True' self.controller.create(self.req, body=self.body) def test_force_false(self): self.body['snapshot']['force'] = 'f' self.controller.create(self.req, body=self.body) def test_force_invalid(self): self.body['snapshot']['force'] = 'foo' self.assertRaises(exception.InvalidParameterValue, self.controller.create, self.req, body=self.body) class DeleteSnapshotTestCase(test.TestCase): def setUp(self): super(DeleteSnapshotTestCase, self).setUp() self.controller = volumes.SnapshotController() self.stubs.Set(cinder.API, 'get', fake_get_volume) self.stubs.Set(cinder.API, 'create_snapshot_force', fake_create_snapshot) self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot) self.stubs.Set(cinder.API, 'delete_snapshot', fake_delete_snapshot) self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots') def test_normal_delete(self): self.req.method = 'POST' self.body = {'snapshot': {'volume_id': 1}} result = self.controller.create(self.req, body=self.body) self.req.method = 'DELETE' result = self.controller.delete(self.req, result['snapshot']['id']) self.assertEqual(result.status_int, 202) class AssistedSnapshotCreateTestCase(test.TestCase): def setUp(self): super(AssistedSnapshotCreateTestCase, self).setUp() self.controller = assisted_snaps.AssistedVolumeSnapshotsController() self.stubs.Set(compute_api.API, 'volume_snapshot_create', fake_compute_volume_snapshot_create) def test_assisted_create(self): req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots') body = {'snapshot': {'volume_id': 1, 'create_info': {}}} req.method = 'POST' self.controller.create(req, body=body) def test_assisted_create_missing_create_info(self): req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots') body = {'snapshot': {'volume_id': 1}} req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) class AssistedSnapshotDeleteTestCase(test.TestCase): def setUp(self): super(AssistedSnapshotDeleteTestCase, self).setUp() self.controller = assisted_snaps.AssistedVolumeSnapshotsController() self.stubs.Set(compute_api.API, 'volume_snapshot_delete', fake_compute_volume_snapshot_delete) def test_assisted_delete(self): params = { 'delete_info': jsonutils.dumps({'volume_id': 1}), } req = fakes.HTTPRequest.blank( '/v2/fake/os-assisted-volume-snapshots?%s' % '&'.join(['%s=%s' % (k, v) for k, v in params.iteritems()])) req.method = 'DELETE' result = self.controller.delete(req, '5') self.assertEqual(result.status_int, 204) def test_assisted_delete_missing_delete_info(self): req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
apache-2.0
dparshin/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/watchlistrule.py
132
2153
# Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. class WatchListRule: '''A rule with instructions to do when the rule is satisified.''' def __init__(self, complex_definition, instructions): self.definitions_to_match = complex_definition.split('|') self._instructions = instructions def match(self, matching_definitions): for test_definition in self.definitions_to_match: if test_definition in matching_definitions: return True return False def instructions(self): return self._instructions def remove_instruction(self, instruction): self._instructions.remove(instruction)
bsd-3-clause
hephaex/a10c
tools/perf/python/twatch.py
1565
1316
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(): cpus = perf.cpu_map() threads = perf.thread_map() evsel = perf.evsel(task = 1, comm = 1, mmap = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU) evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': main()
gpl-2.0
arbylee/python-warrior
pythonwarrior/tests/test_turn.py
1
1336
import mock import unittest from pythonwarrior.turn import Turn from pythonwarrior.abilities.feel import Feel class TestTurnWithActions(unittest.TestCase): def setUp(self): self.turn = Turn({'walk_': None, 'attack_': None}) def test_should_have_no_action_performed_at_first(self): self.assertIsNone(self.turn.action) def test_should_be_able_to_perform_action_and_recall_it(self): self.turn.walk_() self.assertEqual(self.turn.action, ['walk_']) def test_should_include_arguments_passed_to_action(self): self.turn.walk_('forward') self.assertEqual(self.turn.action, ['walk_', 'forward']) def test_should_not_be_able_to_call_multiple_actions_per_turn(self): self.turn.walk_('forward') self.assertRaises(Exception, self.turn.attack_) class TestTurnWithSenses(unittest.TestCase): def setUp(self): with mock.patch('pythonwarrior.abilities.feel.Feel.space', mock.Mock()): self.feel = Feel(mock.Mock()) self.turn = Turn({'feel': self.feel}) def test_call_sense_with_any_argument_and_return_expected_results(self): self.assertEqual(self.turn.feel(), self.feel.perform()) self.assertEqual(self.turn.feel('backward'), self.feel.perform('backward'))
mit
plumbum/Espruino
scripts/get_board_info.py
7
1268
#!/usr/bin/python # This file is part of Espruino, a JavaScript interpreter for Microcontrollers # # Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk> # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # # ---------------------------------------------------------------------------------------- # Simple script to extract the user-friendly name of the board from boards/BOARDNAME.py # Used when auto-generating the website # ---------------------------------------------------------------------------------------- import subprocess; import re; import json; import sys; import os; import importlib; scriptdir = os.path.dirname(os.path.realpath(__file__)) basedir = scriptdir+"/../" sys.path.append(basedir+"scripts"); sys.path.append(basedir+"boards"); import common; import pinutils; # ----------------------------------------------------------------------------------------- # Now scan AF file if len(sys.argv)!=3: print "ERROR, USAGE: get_board_info.py BOARD_NAME 'board.info.foo'" exit(1) boardname = sys.argv[1] # import the board def board = importlib.import_module(boardname) print eval(sys.argv[2]);
mpl-2.0
wimoverwater/Sick-Beard
sickbeard/helpers.py
1
11101
# Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import StringIO, zlib, gzip import os.path, os, glob import urllib, urllib2 import re import sickbeard from sickbeard.exceptions import * from sickbeard import logger, classes from sickbeard.common import * from sickbeard import db from sickbeard import encodingKludge as ek from lib.tvdb_api import tvdb_api, tvdb_exceptions import xml.etree.cElementTree as etree urllib._urlopener = classes.SickBeardURLopener() def indentXML(elem, level=0): ''' Does our pretty printing, makes Matt very happy ''' i = "\n" + level*" " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indentXML(elem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: # Strip out the newlines from text if elem.text: elem.text = elem.text.replace('\n', ' ') if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def replaceExtension(file, newExt): ''' >>> replaceExtension('foo.avi', 'mkv') 'foo.mkv' >>> replaceExtension('.vimrc', 'arglebargle') '.vimrc' >>> replaceExtension('a.b.c', 'd') 'a.b.d' >>> replaceExtension('', 'a') '' >>> replaceExtension('foo.bar', '') 'foo.' ''' sepFile = file.rpartition(".") if sepFile[0] == "": return file else: return sepFile[0] + "." + newExt def isMediaFile (file): # ignore samples if re.search('(^|[\W_])sample\d*[\W_]', file): return False sepFile = file.rpartition(".") if sepFile[2].lower() in mediaExtensions: return True else: return False def sanitizeFileName (name): ''' >>> sanitizeFileName('a/b/c') 'a-b-c' >>> sanitizeFileName('abc') 'abc' >>> sanitizeFileName('a"b') 'ab' ''' for x in "\\/*": name = name.replace(x, "-") for x in ":\"<>|?": name = name.replace(x, "") return name def getURL (url, headers=[]): """ Returns a byte-string retrieved from the url provider. """ opener = urllib2.build_opener() opener.addheaders = [('User-Agent', USER_AGENT), ('Accept-Encoding', 'gzip,deflate')] for cur_header in headers: opener.addheaders.append(cur_header) usock = opener.open(url) url = usock.geturl() encoding = usock.info().get("Content-Encoding") if encoding in ('gzip', 'x-gzip', 'deflate'): content = usock.read() if encoding == 'deflate': data = StringIO.StringIO(zlib.decompress(content)) else: data = gzip.GzipFile('', 'rb', 9, StringIO.StringIO(content)) result = data.read() else: result = usock.read() usock.close() return result def findCertainShow (showList, tvdbid): results = filter(lambda x: x.tvdbid == tvdbid, showList) if len(results) == 0: return None elif len(results) > 1: raise MultipleShowObjectsException() else: return results[0] def findCertainTVRageShow (showList, tvrid): if tvrid == 0: return None results = filter(lambda x: x.tvrid == tvrid, showList) if len(results) == 0: return None elif len(results) > 1: raise MultipleShowObjectsException() else: return results[0] def makeDir (dir): if not ek.ek(os.path.isdir, dir): try: ek.ek(os.makedirs, dir) except OSError: return False return True def makeShowNFO(showID, showDir): logger.log(u"Making NFO for show "+str(showID)+" in dir "+showDir, logger.DEBUG) if not makeDir(showDir): logger.log(u"Unable to create show dir, can't make NFO", logger.ERROR) return False t = tvdb_api.Tvdb(actors=True, **sickbeard.TVDB_API_PARMS) try: myShow = t[int(showID)] except tvdb_exceptions.tvdb_shownotfound: logger.log(u"Unable to find show with id " + str(showID) + " on tvdb, skipping it", logger.ERROR) raise except tvdb_exceptions.tvdb_error: logger.log(u"TVDB is down, can't use its data to add this show", logger.ERROR) raise # check for title and id try: if myShow["seriesname"] == None or myShow["seriesname"] == "" or myShow["id"] == None or myShow["id"] == "": logger.log(u"Incomplete info for show with id " + str(showID) + " on tvdb, skipping it", logger.ERROR) return False except tvdb_exceptions.tvdb_attributenotfound: logger.log(u"Incomplete info for show with id " + str(showID) + " on tvdb, skipping it", logger.ERROR) return False tvNode = buildNFOXML(myShow) # Make it purdy indentXML( tvNode ) nfo = etree.ElementTree( tvNode ) logger.log(u"Writing NFO to "+os.path.join(showDir, "tvshow.nfo"), logger.DEBUG) nfo_filename = os.path.join(showDir, "tvshow.nfo").encode('utf-8') nfo_fh = open(nfo_filename, 'w') nfo.write( nfo_fh, encoding="utf-8" ) return True def buildNFOXML(myShow): ''' Build an etree.Element of the root node of an NFO file with the data from `myShow`, a TVDB show object. >>> from collections import defaultdict >>> from xml.etree.cElementTree import tostring >>> show = defaultdict(lambda: None, _actors=[]) >>> tostring(buildNFOXML(show)) '<tvshow xsd="http://www.w3.org/2001/XMLSchema" xsi="http://www.w3.org/2001/XMLSchema-instance"><title /><rating /><plot /><episodeguide><url /></episodeguide><mpaa /><id /><genre /><premiered /><studio /></tvshow>' >>> show['seriesname'] = 'Peaches' >>> tostring(buildNFOXML(show)) '<tvshow xsd="http://www.w3.org/2001/XMLSchema" xsi="http://www.w3.org/2001/XMLSchema-instance"><title>Peaches</title><rating /><plot /><episodeguide><url /></episodeguide><mpaa /><id /><genre /><premiered /><studio /></tvshow>' >>> show['contentrating'] = 'PG' >>> tostring(buildNFOXML(show)) '<tvshow xsd="http://www.w3.org/2001/XMLSchema" xsi="http://www.w3.org/2001/XMLSchema-instance"><title>Peaches</title><rating /><plot /><episodeguide><url /></episodeguide><mpaa>PG</mpaa><id /><genre /><premiered /><studio /></tvshow>' >>> show['genre'] = 'Fruit|Edibles' >>> tostring(buildNFOXML(show)) '<tvshow xsd="http://www.w3.org/2001/XMLSchema" xsi="http://www.w3.org/2001/XMLSchema-instance"><title>Peaches</title><rating /><plot /><episodeguide><url /></episodeguide><mpaa>PG</mpaa><id /><genre>Fruit / Edibles</genre><premiered /><studio /></tvshow>' ''' tvNode = etree.Element( "tvshow" ) for ns in XML_NSMAP.keys(): tvNode.set(ns, XML_NSMAP[ns]) title = etree.SubElement( tvNode, "title" ) if myShow["seriesname"] != None: title.text = myShow["seriesname"] rating = etree.SubElement( tvNode, "rating" ) if myShow["rating"] != None: rating.text = myShow["rating"] plot = etree.SubElement( tvNode, "plot" ) if myShow["overview"] != None: plot.text = myShow["overview"] episodeguide = etree.SubElement( tvNode, "episodeguide" ) episodeguideurl = etree.SubElement( episodeguide, "url" ) if myShow["id"] != None: showurl = sickbeard.TVDB_BASE_URL + '/series/' + myShow["id"] + '/all/en.zip' episodeguideurl.text = showurl mpaa = etree.SubElement( tvNode, "mpaa" ) if myShow["contentrating"] != None: mpaa.text = myShow["contentrating"] tvdbid = etree.SubElement( tvNode, "id" ) if myShow["id"] != None: tvdbid.text = myShow["id"] genre = etree.SubElement( tvNode, "genre" ) if myShow["genre"] != None: genre.text = " / ".join([x for x in myShow["genre"].split('|') if x != '']) premiered = etree.SubElement( tvNode, "premiered" ) if myShow["firstaired"] != None: premiered.text = myShow["firstaired"] studio = etree.SubElement( tvNode, "studio" ) if myShow["network"] != None: studio.text = myShow["network"] for actor in myShow['_actors']: cur_actor = etree.SubElement( tvNode, "actor" ) cur_actor_name = etree.SubElement( cur_actor, "name" ) cur_actor_name.text = actor['name'] cur_actor_role = etree.SubElement( cur_actor, "role" ) cur_actor_role_text = actor['role'] if cur_actor_role_text != None: cur_actor_role.text = cur_actor_role_text cur_actor_thumb = etree.SubElement( cur_actor, "thumb" ) cur_actor_thumb_text = actor['image'] if cur_actor_thumb_text != None: cur_actor_thumb.text = cur_actor_thumb_text return tvNode def searchDBForShow(regShowName): showNames = set([regShowName+'%', regShowName.replace(' ','_')+'%']) # if tvdb fails then try looking it up in the db myDB = db.DBConnection() yearRegex = "(.*?)\s*([(]?)(\d{4})(?(2)[)]?).*" for showName in showNames: sqlResults = myDB.select("SELECT * FROM tv_shows WHERE show_name LIKE ? OR tvr_name LIKE ?", [showName, showName]) if len(sqlResults) == 1: return (int(sqlResults[0]["tvdb_id"]), sqlResults[0]["show_name"]) else: # if we didn't get exactly one result then try again with the year stripped off if possible match = re.match(yearRegex, showName) if match: logger.log(u"Unable to match original name but trying to manually strip and specify show year", logger.DEBUG) sqlResults = myDB.select("SELECT * FROM tv_shows WHERE (show_name LIKE ? OR tvr_name LIKE ?) AND startyear = ?", [match.group(1)+'%', match.group(1)+'%', match.group(3)]) if len(sqlResults) == 0: logger.log(u"Unable to match a record in the DB for "+showName, logger.DEBUG) continue elif len(sqlResults) > 1: logger.log(u"Multiple results for "+showName+" in the DB, unable to match show name", logger.DEBUG) continue else: return (int(sqlResults[0]["tvdb_id"]), sqlResults[0]["show_name"]) return None def sizeof_fmt(num): ''' >>> sizeof_fmt(2) '2.0 bytes' >>> sizeof_fmt(1024) '1.0 KB' >>> sizeof_fmt(2048) '2.0 KB' >>> sizeof_fmt(2**20) '1.0 MB' >>> sizeof_fmt(1234567) '1.2 MB' ''' for x in ['bytes','KB','MB','GB','TB']: if num < 1024.0: return "%3.1f %s" % (num, x) num /= 1024.0 def listMediaFiles(dir): if not dir or not ek.ek(os.path.isdir, dir): return [] files = [] for curFile in ek.ek(os.listdir, dir): fullCurFile = ek.ek(os.path.join, dir, curFile) # if it's a dir do it recursively if ek.ek(os.path.isdir, fullCurFile) and not curFile.startswith('.') and not curFile == 'Extras': files += listMediaFiles(fullCurFile) elif isMediaFile(curFile): files.append(fullCurFile) return files if __name__ == '__main__': import doctest doctest.testmod() # vim: noet
gpl-3.0
Enucatl/pypes
pypes/plugins/name_changer.py
1
1474
"""Change the keys in the packet through a dictionary""" import logging import pypes.component log = logging.getLogger(__name__) class NameChanger(pypes.component.Component): """ The run method will get the keys from the input packet and set them back in the packet with the new key specified by the value in the init dictionary. """ # defines the type of component we're creating. __metatype__ = 'TRANSFORMER' def __init__(self, dictionary): # initialize parent class pypes.component.Component.__init__(self) # Optionally add/remove component ports # self.remove_output('out') self._dictionary = dictionary # log successful initialization message log.debug('Component Initialized: %s', self.__class__.__name__) def run(self): # Define our components entry point while True: packet = self.receive("in") if packet is None: self.yield_ctrl() continue try: for key, value in self._dictionary.items(): data = packet.get(key) packet.delete(key) packet.set(value, data) except: log.error('Component Failed: %s', name, exc_info=True) self.send("out", packet) # yield the CPU, allowing another component to run self.yield_ctrl()
apache-2.0
arrti/myF2E
handler/topic.py
1
35861
#!/usr/bin/env python # coding=utf-8 # # Copyright 2012 F2E.im # Do have a faith in what you're doing. # Make your life a story worth telling. import uuid import hashlib from PIL import Image import StringIO import time import json import re import urllib2 import tornado.web import lib.jsonp import pprint import math import datetime import glob from base import * from lib.variables import * from form.topic import * from lib.variables import gen_random from lib.xss import XssCleaner from lib.utils import find_mentions class IndexHandler(BaseHandler): def get(self, template_variables = {}): user_info = self.current_user page = int(self.get_argument("p", "1")) template_variables["user_info"] = user_info user_id = None if(user_info): template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]); user_id = user_info['uid'] template_variables["status_counter"] = { "users": self.user_model.get_all_users_count(), "nodes": self.node_model.get_all_nodes_count(), "topics": self.topic_model.get_all_topics_count(), "replies": self.reply_model.get_all_replies_count(), } blocked_topic_list = self.blocked_model.get_blocked_topic_id(user_id) if not blocked_topic_list: blocked_topic = [-1,-2]# no blocked, all invalid topic id > 0, this will block no topic else: blocked_topic = [-1]#to ensure that sql "in ()" have at least 2 items, only 1 item in tuple will be (22,), the ',' is wrong in sql for row in blocked_topic_list: blocked_topic.append(row['involved_topic_id']) blocked_topic = re.sub('L', '', str(tuple(blocked_topic)))#int type from mysql convert python long type like 22L, so use re remove 'L' blocked_user_list = self.blocked_model.get_blocked_user_id(user_id) if not blocked_user_list: blocked_user = [-1,-2]# no blocked, all invalid user id > 0, this will block no user else: blocked_user = [-1] for row in blocked_user_list: blocked_user.append(row['involved_user_id']) blocked_user = re.sub('L', '', str(tuple(blocked_user))) template_variables["topics"] = self.topic_model.get_all_not_blocked_topics(current_page = page, blocked_user = blocked_user, blocked_topic = blocked_topic) template_variables["planes"] = self.plane_model.get_all_planes_with_nodes() template_variables["hot_nodes"] = self.node_model.get_all_hot_nodes() template_variables["active_page"] = "topic" template_variables["gen_random"] = gen_random self.render("topic/topics.html", **template_variables) class NodeTopicsHandler(BaseHandler): def get(self, node_slug, template_variables = {}): user_info = self.current_user page = int(self.get_argument("p", "1")) template_variables["user_info"] = user_info if(user_info): template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]); template_variables["topics"] = self.topic_model.get_all_topics_by_node_slug(current_page = page, node_slug = node_slug) template_variables["node"] = self.node_model.get_node_by_node_slug(node_slug) template_variables["active_page"] = "topic" template_variables["gen_random"] = gen_random self.render("topic/node_topics.html", **template_variables) class ViewHandler(BaseHandler): def get(self, topic_id, template_variables = {}): user_info = self.current_user page = int(self.get_argument("p", "1")) user_info = self.get_current_user() template_variables["user_info"] = user_info user_id = None if(user_info): template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]); template_variables["topic_favorited"] = self.favorite_model.get_favorite_by_topic_id_and_owner_user_id(topic_id, user_info["uid"]); user_id = user_info['uid'] template_variables["gen_random"] = gen_random template_variables["topic"] = self.topic_model.get_topic_by_topic_id(topic_id) template_variables["appends"] = self.append_model.get_topic_append_by_topic_id(topic_id) # check reply count and cal current_page if `p` not given reply_num = 106 reply_count = template_variables["topic"]["reply_count"] reply_last_page = (reply_count / reply_num + (reply_count % reply_num and 1)) or 1 page = int(self.get_argument("p", reply_last_page)) template_variables["reply_num"] = reply_num template_variables["current_page"] = page blocked_user_list = self.blocked_model.get_blocked_user_id(user_id) if not blocked_user_list: blocked_user = [-1,-2]# no blocked, all invalid user id > 0, this will block no user else: blocked_user = [-1] for row in blocked_user_list: blocked_user.append(row['involved_user_id']) blocked_user = re.sub('L', '', str(tuple(blocked_user))) template_variables["replies"] = self.reply_model.get_all_not_blocked_replies_by_topic_id(topic_id, current_page = page, num = reply_num, blocked_user = blocked_user) template_variables["active_page"] = "topic" # update topic reply_count and hits self.topic_model.update_topic_by_topic_id(topic_id, { "reply_count": template_variables["replies"]["page"]["total"], "hits": (template_variables["topic"]["hits"] or 0) + 1, }) self.render("topic/view.html", **template_variables) @tornado.web.authenticated def post(self, template_variables = {}): template_variables = {} # validate the fields form = ReplyForm(self) if not form.validate(): self.get(form.tid.data, {"errors": form.errors}) return # continue while validate succeed topic_info = self.topic_model.get_topic_by_topic_id(form.tid.data) replied_info = self.reply_model.get_user_last_reply_by_topic_id(self.current_user["uid"], form.tid.data) if(not topic_info): template_variables["errors"] = {} template_variables["errors"]["invalid_topic_info"] = [u"要回复的帖子不存在"] self.get(form.tid.data, template_variables) return if(replied_info): #last_replied_fingerprint = hashlib.sha1(str(replied_info.topic_id) + str(replied_info.author_id) + replied_info.content).hexdigest() #new_replied_fingerprint = hashlib.sha1(str(form.tid.data) + str(self.current_user["uid"]) + form.content.data).hexdigest() last_replied_fingerprint = hashlib_sha1(str(replied_info.topic_id) + str(replied_info.author_id) + replied_info.content) new_replied_fingerprint = hashlib_sha1(str(form.tid.data) + str(self.current_user["uid"]) + form.content.data) if last_replied_fingerprint == new_replied_fingerprint: template_variables["errors"] = {} template_variables["errors"]["duplicated_reply"] = [u"回复重复提交"] self.get(form.tid.data, template_variables) return reply_info = { "author_id": self.current_user["uid"], "topic_id": form.tid.data, # "content": XssCleaner().strip(form.content.data), "content": form.content.data, "created": time.strftime('%Y-%m-%d %H:%M:%S'), } reply_id = self.reply_model.add_new_reply(reply_info) # update topic last_replied_by and last_replied_time self.topic_model.update_topic_by_topic_id(form.tid.data, { "last_replied_by": self.current_user["uid"], "last_replied_time": time.strftime('%Y-%m-%d %H:%M:%S'), "last_touched": time.strftime('%Y-%m-%d %H:%M:%S'), }) # create reply notification if not self.current_user["uid"] == topic_info["author_id"]: self.notification_model.add_new_notification({ "trigger_user_id": self.current_user["uid"], "involved_type": 1, # 0: mention, 1: reply "involved_user_id": topic_info["author_id"], "involved_topic_id": form.tid.data, "content": form.content.data, "status": 0, "occurrence_time": time.strftime('%Y-%m-%d %H:%M:%S'), }) # create @username notification for username in set(find_mentions(form.content.data)): mentioned_user = self.user_model.get_user_by_username(username) if not mentioned_user: continue if mentioned_user["uid"] == self.current_user["uid"]: continue if mentioned_user["uid"] == topic_info["author_id"]: continue self.notification_model.add_new_notification({ "trigger_user_id": self.current_user["uid"], "involved_type": 0, # 0: mention, 1: reply "involved_user_id": mentioned_user["uid"], "involved_topic_id": form.tid.data, "content": form.content.data, "status": 0, "occurrence_time": time.strftime('%Y-%m-%d %H:%M:%S'), }) # update reputation of topic author if not self.current_user["uid"] == topic_info["author_id"] and not replied_info: topic_time_diff = datetime.datetime.now() - topic_info["created"] reputation = topic_info["author_reputation"] or 0 reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10) self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation}) # self.get(form.tid.data) self.redirect("/t/%s#reply%s" % (form.tid.data, topic_info["reply_count"] + 1)) class CreateHandler(BaseHandler): @tornado.web.authenticated def get(self, node_slug = None, template_variables = {}): user_info = self.current_user template_variables["user_info"] = user_info template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]); template_variables["gen_random"] = gen_random template_variables["node_slug"] = node_slug template_variables["active_page"] = "topic" self.render("topic/create.html", **template_variables) @tornado.web.authenticated def post(self, node_slug = None, template_variables = {}): template_variables = {} # validate the fields form = CreateForm(self) if not form.validate(): self.get(node_slug, {"errors": form.errors}) return # continue while validate succeed node = self.node_model.get_node_by_node_slug(node_slug) last_created = self.topic_model.get_user_last_created_topic(self.current_user["uid"]) if last_created: #last_created_fingerprint = hashlib.sha1(last_created.title + last_created.content + str(last_created.node_id)).hexdigest() #new_created_fingerprint = hashlib.sha1(form.title.data + form.content.data + str(node["id"])).hexdigest() last_created_fingerprint = hashlib_sha1(last_created.title + last_created.content + str(last_created.node_id)) new_created_fingerprint = hashlib_sha1(form.title.data + form.content.data + str(node["id"])) if last_created_fingerprint == new_created_fingerprint: template_variables["errors"] = {} template_variables["errors"]["duplicated_topic"] = [u"帖子重复提交"] self.get(node_slug, template_variables) return topic_info = { "author_id": self.current_user["uid"], "title": form.title.data, # "content": XssCleaner().strip(form.content.data), "content": form.content.data, "node_id": node["id"], "created": time.strftime('%Y-%m-%d %H:%M:%S'), "reply_count": 0, "last_touched": time.strftime('%Y-%m-%d %H:%M:%S'), } reply_id = self.topic_model.add_new_topic(topic_info) # update reputation of topic author reputation = self.current_user["reputation"] or 0 reputation = reputation - 5 reputation = 0 if reputation < 0 else reputation self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation}) self.redirect("/") class EditHandler(BaseHandler): @tornado.web.authenticated def get(self, topic_id, template_variables = {}): user_info = self.current_user template_variables["user_info"] = user_info template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]); template_variables["topic"] = self.topic_model.get_topic_by_topic_id(topic_id) template_variables["gen_random"] = gen_random template_variables["active_page"] = "topic" self.render("topic/edit.html", **template_variables) @tornado.web.authenticated def post(self, topic_id, template_variables = {}): template_variables = {} # validate the fields form = CreateForm(self) if not form.validate(): self.get(topic_id, {"errors": form.errors}) return # continue while validate succeed topic_info = self.topic_model.get_topic_by_topic_id(topic_id) if(not topic_info["author_id"] == self.current_user["uid"]): template_variables["errors"] = {} template_variables["errors"]["invalid_permission"] = [u"没有权限修改该主题"] self.get(topic_id, template_variables) return update_topic_info = { "title": form.title.data, # "content": XssCleaner().strip(form.content.data), "content": form.content.data, "updated": time.strftime('%Y-%m-%d %H:%M:%S'), "last_touched": time.strftime('%Y-%m-%d %H:%M:%S'), } reply_id = self.topic_model.update_topic_by_topic_id(topic_id, update_topic_info) # update reputation of topic author reputation = topic_info["author_reputation"] or 0 reputation = reputation - 2 reputation = 0 if reputation < 0 else reputation self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation}) self.redirect("/t/%s" % topic_id) class AppendHandler(BaseHandler): @tornado.web.authenticated def get(self, topic_id, template_variables = {}): user_info = self.current_user template_variables["user_info"] = user_info template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]); template_variables["topic"] = self.topic_model.get_topic_by_topic_id(topic_id) template_variables["appends"] = self.append_model.get_topic_append_by_topic_id(topic_id) template_variables["gen_random"] = gen_random template_variables["active_page"] = "topic" self.render("topic/append.html", **template_variables) @tornado.web.authenticated def post(self, topic_id, template_variables = {}): template_variables = {} # validate the fields form = AppendForm(self) if not form.validate(): self.get(topic_id, {"errors": form.errors}) return # continue while validate succeed topic_info = self.topic_model.get_topic_by_topic_id(topic_id) if(not topic_info["author_id"] == self.current_user["uid"]): template_variables["errors"] = {} template_variables["errors"]["invalid_permission"] = [u"没有权限为该主题追加说明"] self.get(topic_id, template_variables) return append_content = '\n--- \n' append_content = append_content + form.content.data append_info = { "topic_id": topic_id, "author_id": self.current_user["uid"], "content": form.content.data, "created": time.strftime('%Y-%m-%d %H:%M:%S'), } update_topic_info = { "updated": time.strftime('%Y-%m-%d %H:%M:%S'), "last_touched": time.strftime('%Y-%m-%d %H:%M:%S'), } reply_id = self.topic_model.update_topic_by_topic_id(topic_id, update_topic_info) append_id = self.append_model.add_new_append(append_info) # update reputation of topic author reputation = topic_info["author_reputation"] or 0 reputation = reputation - 2 reputation = 0 if reputation < 0 else reputation self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation}) self.redirect("/t/%s" % topic_id) class ProfileHandler(BaseHandler): def get(self, user, template_variables = {}): if(re.match(r'^\d+$', user)): user_info = self.user_model.get_user_by_uid(user) else: user_info = self.user_model.get_user_by_username(user) if not user_info: self.write_error(404) return current_user = self.current_user page = int(self.get_argument("p", "1")) template_variables["user_info"] = user_info if(user_info): template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } if(current_user): template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]); ''' if user_info["github"]: github_repos = self.mc.get(str("%s_github_repos" % user_info["github"])) or json.JSONDecoder().decode(urllib2.urlopen('https://api.github.com/users/%s/repos' % user_info["github"]).read()) self.mc.set(str("%s_github_repos" % user_info["github"]), github_repos) template_variables["github_repos"] = github_repos ''' template_variables["topics"] = self.topic_model.get_user_all_topics(user_info["uid"], current_page = page) template_variables["replies"] = self.reply_model.get_user_all_replies(user_info["uid"], current_page = page) template_variables["gen_random"] = gen_random template_variables["active_page"] = "_blank" self.render("topic/profile.html", **template_variables) class CardHandler(BaseHandler): def get(self, user, template_variables = {}): if(re.match(r'^\d+$', user)): user_info = self.user_model.get_user_by_uid(user) else: user_info = self.user_model.get_user_by_username(user) if not user_info: self.write_error(404) return current_user = self.current_user page = int(self.get_argument("p", "1")) template_variables["user_info"] = user_info template_variables["gen_random"] = gen_random template_variables["active_page"] = "_blank" self.render("topic/card.html", **template_variables) class VoteHandler(BaseHandler): def get(self, template_variables = {}): topic_id = int(self.get_argument("topic_id")) topic_info = self.topic_model.get_topic_by_topic_id(topic_id) if not topic_info: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "topic_not_exist", })) return if self.current_user["uid"] == topic_info["author_id"]: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "can_not_vote_your_topic", })) return if self.vote_model.get_vote_by_topic_id_and_trigger_user_id(topic_id, self.current_user["uid"]): self.write(lib.jsonp.print_JSON({ "success": 0, "message": "already_voted", })) return self.vote_model.add_new_vote({ "trigger_user_id": self.current_user["uid"], "involved_type": 0, # 0: topic, 1: reply "involved_user_id": topic_info["author_id"], "involved_topic_id": topic_id, "status": 0, "occurrence_time": time.strftime('%Y-%m-%d %H:%M:%S'), }) self.write(lib.jsonp.print_JSON({ "success": 1, "message": "thanks_for_your_vote", })) # update reputation of topic author topic_time_diff = datetime.datetime.now() - topic_info["created"] reputation = topic_info["author_reputation"] or 0 reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10) self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation}) class UserTopicsHandler(BaseHandler): def get(self, user, template_variables = {}): if(re.match(r'^\d+$', user)): user_info = self.user_model.get_user_by_uid(user) else: user_info = self.user_model.get_user_by_username(user) current_user = self.current_user page = int(self.get_argument("p", "1")) template_variables["user_info"] = user_info if(user_info): template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } if(current_user): template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]); template_variables["topics"] = self.topic_model.get_user_all_topics(user_info["uid"], current_page = page) template_variables["active_page"] = "topic" template_variables["gen_random"] = gen_random self.render("topic/user_topics.html", **template_variables) class UserRepliesHandler(BaseHandler): def get(self, user, template_variables = {}): if(re.match(r'^\d+$', user)): user_info = self.user_model.get_user_by_uid(user) else: user_info = self.user_model.get_user_by_username(user) current_user = self.current_user page = int(self.get_argument("p", "1")) template_variables["user_info"] = user_info if(user_info): template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } if(current_user): template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]); template_variables["replies"] = self.reply_model.get_user_all_replies(user_info["uid"], current_page = page) template_variables["active_page"] = "topic" template_variables["gen_random"] = gen_random self.render("topic/user_replies.html", **template_variables) class UserFavoritesHandler(BaseHandler): def get(self, user, template_variables = {}): if(re.match(r'^\d+$', user)): user_info = self.user_model.get_user_by_uid(user) else: user_info = self.user_model.get_user_by_username(user) current_user = self.current_user page = int(self.get_argument("p", "1")) template_variables["user_info"] = user_info if(user_info): template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } if(current_user): template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(current_user["uid"]); template_variables["favorites"] = self.favorite_model.get_user_all_favorites(user_info["uid"], current_page = page) template_variables["active_page"] = "topic" template_variables["gen_random"] = gen_random self.render("topic/user_favorites.html", **template_variables) class ReplyEditHandler(BaseHandler): @tornado.web.authenticated def get(self, reply_id, template_variables = {}): user_info = self.current_user template_variables["user_info"] = user_info template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]); template_variables["reply"] = self.reply_model.get_reply_by_reply_id(reply_id) template_variables["gen_random"] = gen_random template_variables["active_page"] = "topic" self.render("topic/reply_edit.html", **template_variables) @tornado.web.authenticated def post(self, reply_id, template_variables = {}): template_variables = {} # validate the fields form = ReplyEditForm(self) if not form.validate(): self.get(reply_id, {"errors": form.errors}) return # continue while validate succeed reply_info = self.reply_model.get_reply_by_reply_id(reply_id) if(not reply_info["author_id"] == self.current_user["uid"]): template_variables["errors"] = {} template_variables["errors"]["invalid_permission"] = [u"没有权限修改该回复"] self.get(reply_id, template_variables) return update_reply_info = { # "content": XssCleaner().strip(form.content.data), "content": form.content.data, "updated": time.strftime('%Y-%m-%d %H:%M:%S'), } reply_id = self.reply_model.update_reply_by_reply_id(reply_id, update_reply_info) # update reputation of topic author reputation = self.current_user["reputation"] or 0 reputation = reputation - 2 reputation = 0 if reputation < 0 else reputation self.user_model.set_user_base_info_by_uid(reply_info["author_id"], {"reputation": reputation}) self.redirect("/t/%s" % reply_info["topic_id"]) class FavoriteHandler(BaseHandler): def get(self, template_variables = {}): topic_id = int(self.get_argument("topic_id")) topic_info = self.topic_model.get_topic_by_topic_id(topic_id) if not self.current_user: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "user_not_login", })) return if not topic_info: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "topic_not_exist", })) return if self.current_user["uid"] == topic_info["author_id"]: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "can_not_favorite_your_topic", })) return if self.favorite_model.get_favorite_by_topic_id_and_owner_user_id(topic_id, self.current_user["uid"]): self.write(lib.jsonp.print_JSON({ "success": 0, "message": "already_favorited", })) return self.favorite_model.add_new_favorite({ "owner_user_id": self.current_user["uid"], "involved_type": 0, # 0: topic, 1: reply "involved_topic_id": topic_id, "created": time.strftime('%Y-%m-%d %H:%M:%S'), }) self.write(lib.jsonp.print_JSON({ "success": 1, "message": "favorite_success", })) # update reputation of topic author topic_time_diff = datetime.datetime.now() - topic_info["created"] reputation = topic_info["author_reputation"] or 0 reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10) self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation}) class CancelFavoriteHandler(BaseHandler): def get(self, template_variables = {}): topic_id = int(self.get_argument("topic_id")) topic_info = self.topic_model.get_topic_by_topic_id(topic_id) favorite_info = None if not self.current_user: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "user_not_login", })) return if not topic_info: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "topic_not_exist", })) return favorite_info = self.favorite_model.get_favorite_by_topic_id_and_owner_user_id(topic_id, self.current_user["uid"]) if not favorite_info: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "not_been_favorited", })) return self.favorite_model.cancel_exist_favorite_by_id(favorite_info["id"]) self.write(lib.jsonp.print_JSON({ "success": 1, "message": "cancel_favorite_success", })) # update reputation of topic author topic_time_diff = datetime.datetime.now() - topic_info["created"] reputation = topic_info["author_reputation"] or 0 reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10) self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation}) class BlockTopicHandler(BaseHandler): def get(self, template_variables = {}): topic_id = int(self.get_argument("topic_id")) topic_info = self.topic_model.get_topic_by_topic_id(topic_id) if not self.current_user: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "user_not_login", })) return if not topic_info: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "topic_not_exist", })) return if self.current_user["uid"] == topic_info["author_id"]: self.write(lib.jsonp.print_JSON({ "success": 0, "message": "can_not_block_your_topic", })) return if self.blocked_model.get_user_blocked_topic_by_involved_topic_id_and_trigger_user_id(topic_id, self.current_user["uid"]): self.write(lib.jsonp.print_JSON({ "success": 0, "message": "already_blocked", })) return self.blocked_model.add_new_blocked({ "trigger_user_id": self.current_user["uid"], "involved_topic_id": topic_id, "status": 0,#0: blocked by user; 1: blocked by admin "created": time.strftime('%Y-%m-%d %H:%M:%S'), }) self.write(lib.jsonp.print_JSON({ "success": 1, "message": "block_success", })) # update reputation of topic author '''topic_time_diff = datetime.datetime.now() - topic_info["created"] reputation = topic_info["author_reputation"] or 0 reputation = reputation + 2 * math.log(self.current_user["reputation"] or 0 + topic_time_diff.days + 10, 10) self.user_model.set_user_base_info_by_uid(topic_info["author_id"], {"reputation": reputation})''' class MembersHandler(BaseHandler): def get(self, template_variables = {}): user_info = self.current_user template_variables["user_info"] = user_info if(user_info): template_variables["user_info"]["counter"] = { "topics": self.topic_model.get_user_all_topics_count(user_info["uid"]), "replies": self.reply_model.get_user_all_replies_count(user_info["uid"]), "favorites": self.favorite_model.get_user_favorite_count(user_info["uid"]), } template_variables["notifications_count"] = self.notification_model.get_user_unread_notification_count(user_info["uid"]); template_variables["members"] = self.user_model.get_users_by_latest(num = 49) template_variables["active_members"] = self.user_model.get_users_by_last_login(num = 49) template_variables["gen_random"] = gen_random template_variables["active_page"] = "members" self.render("topic/members.html", **template_variables)
bsd-3-clause
martinx/martinxus-foursquared
util/gen_parser.py
262
4392
#!/usr/bin/python import datetime import sys import textwrap import common from xml.dom import pulldom PARSER = """\ /** * Copyright 2009 Joe LaPenna */ package com.joelapenna.foursquare.parsers; import com.joelapenna.foursquare.Foursquare; import com.joelapenna.foursquare.error.FoursquareError; import com.joelapenna.foursquare.error.FoursquareParseException; import com.joelapenna.foursquare.types.%(type_name)s; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserException; import java.io.IOException; import java.util.logging.Level; import java.util.logging.Logger; /** * Auto-generated: %(timestamp)s * * @author Joe LaPenna (joe@joelapenna.com) * @param <T> */ public class %(type_name)sParser extends AbstractParser<%(type_name)s> { private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName()); private static final boolean DEBUG = Foursquare.PARSER_DEBUG; @Override public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException, FoursquareError, FoursquareParseException { parser.require(XmlPullParser.START_TAG, null, null); %(type_name)s %(top_node_name)s = new %(type_name)s(); while (parser.nextTag() == XmlPullParser.START_TAG) { String name = parser.getName(); %(stanzas)s } else { // Consume something we don't understand. if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name); skipSubTree(parser); } } return %(top_node_name)s; } }""" BOOLEAN_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText())); """ GROUP_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser)); """ COMPLEX_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser)); """ STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(parser.nextText()); """ def main(): type_name, top_node_name, attributes = common.WalkNodesForAttributes( sys.argv[1]) GenerateClass(type_name, top_node_name, attributes) def GenerateClass(type_name, top_node_name, attributes): """generate it. type_name: the type of object the parser returns top_node_name: the name of the object the parser returns. per common.WalkNodsForAttributes """ stanzas = [] for name in sorted(attributes): typ, children = attributes[name] replacements = Replacements(top_node_name, name, typ, children) if typ == common.BOOLEAN: stanzas.append(BOOLEAN_STANZA % replacements) elif typ == common.GROUP: stanzas.append(GROUP_STANZA % replacements) elif typ in common.COMPLEX: stanzas.append(COMPLEX_STANZA % replacements) else: stanzas.append(STANZA % replacements) if stanzas: # pop off the extranious } else for the first conditional stanza. stanzas[0] = stanzas[0].replace('} else ', '', 1) replacements = Replacements(top_node_name, name, typ, [None]) replacements['stanzas'] = '\n'.join(stanzas).strip() print PARSER % replacements def Replacements(top_node_name, name, typ, children): # CameCaseClassName type_name = ''.join([word.capitalize() for word in top_node_name.split('_')]) # CamelCaseClassName camel_name = ''.join([word.capitalize() for word in name.split('_')]) # camelCaseLocalName attribute_name = camel_name.lower().capitalize() # mFieldName field_name = 'm' + camel_name if children[0]: sub_parser_camel_case = children[0] + 'Parser' else: sub_parser_camel_case = (camel_name[:-1] + 'Parser') return { 'type_name': type_name, 'name': name, 'top_node_name': top_node_name, 'camel_name': camel_name, 'parser_name': typ + 'Parser', 'attribute_name': attribute_name, 'field_name': field_name, 'typ': typ, 'timestamp': datetime.datetime.now(), 'sub_parser_camel_case': sub_parser_camel_case, 'sub_type': children[0] } if __name__ == '__main__': main()
apache-2.0
santosh26a/Semantic-Data-Validation-using-Python
TenderDate_validation.py
1
2411
# This validates date properties of Tenders. class TenderDate_validation: STATE=None # It takes arguments such as callForTendersPublicationDate, bidDeadlineDate, contractAwardNoticePublicationDate, and parentContractAwardDate def __init__(self, callForTendersPublicationDate, bidDeadlineDate, contractAwardNoticePublicationDate, parentContractAwardDate): self.callForTendersPublicationDate=callForTendersPublicationDate self.bidDeadlineDate=bidDeadlineDate self.contractAwardNoticePublicationDate=contractAwardNoticePublicationDate self.parentContractAwardDate=parentContractAwardDate def Tender_Calling_Date_Contract_Award_Date(self): 'Rule: validating if Contract_Award_Date (Schema ID: parentContractAwardDate) is later than the Tender_Calling_Date (Schema ID: callForTendersPublicationDate)' if self.parentContractAwardDate is not None and self.callForTendersPublicationDate is not None and self.parentContractAwardDate > self.callForTendersPublicationDate: TenderDate_validation.STATE=True return TenderDate_validation.STATE else: TenderDate_validation.STATE=False return TenderDate_validation.STATE def Tender_Calling_Date_bidDeadlineDate(self): 'Rule: validating if bidDeadlineDate (Schema ID: bidDeadlineDate) is later than the Tender_Calling_Date' if self.bidDeadlineDate is not None and self.callForTendersPublicationDate is not None and self.bidDeadlineDate > self.callForTendersPublicationDate: TenderDate_validation.STATE=True return TenderDate_validation.STATE else: TenderDate_validation.STATE=False return TenderDate_validation.STATE def Tender_Calling_Date_Award_Notice_Date(self): 'Rule: validating if Award_Notice_Date (Schema ID: contractAwardNoticePublicationDate) is later than the Tender_Calling_Date' if self.contractAwardNoticePublicationDate is not None and self.callForTendersPublicationDate is not None and self.contractAwardNoticePublicationDate > self.callForTendersPublicationDate: TenderDate_validation.STATE=True return TenderDate_validation.STATE else: TenderDate_validation.STATE=False return TenderDate_validation.STATE
mit
chebee7i/twitter
scripts/fisher.py
1
4408
""" Write hashtag Frobenius scores to file. """ import io from operator import itemgetter import time import numpy as np from scipy.stats.mstats import mquantiles import twitterproj from twitterproj.fisher import * from twitterproj.fisher import pipeline db = twitterproj.connect() def hashtag_scores(): N = 5000 lines = frobenius_hashtags(5000) lines = [u','.join(map(unicode, line)) for line in lines] lines.insert(0, u'# hashtag, count, user count, frobenius norm of FIM') filename = 'htscores.csv' with io.open(filename, 'w') as fobj: fobj.write(u'\n'.join(lines)) def scatter(): with open('htscores.csv') as f: lines = list(f.readlines())[1:] counts = [] usercounts = [] scores = [] for line in lines: ht, c, uc, score = line.strip().split(',') counts.append(float(c)) usercounts.append(float(uc)) scores.append(float(score)) import matplotlib.pyplot as plt plt.style.use('ggplot') f, axes = plt.subplots(1,2) axes = list(reversed(axes)) plt.sca(axes[0]) clip = None scat = axes[0].scatter(counts[:clip], usercounts[:clip], c=scores[:clip], s=10, cmap=plt.cm.cool, edgecolors='none', alpha=.2) cb = f.colorbar(scat) axes[0].set_xlabel('Hashtag Count') axes[0].set_ylabel('Hashtag User Count') axes[0].set_xscale('log') axes[0].set_yscale('log') cb.set_label('Frobenius Norm') if clip is not None: axes[0].set_title('Lowest {0} Scores'.format(clip)) axes[1].hist(scores, bins=15) axes[1].set_xlabel('$d$, Frobenius Norm') axes[1].set_ylabel('Bin count of $d$') if clip is not None: axes[1].set_title('Histogram of all scores' ) f.tight_layout() f.savefig('scores.pdf') class Runner(object): def __init__(self, hashtags): self.hashtags = hashtags def __call__(self, k): htags = self.hashtags[:k] counties = frobenius_counties(htags) scores = [x[1] for x in counties] quants = mquantiles(scores) return quants def county_scores(k=None, relative=True, to_csv=True): if k is None: import sys try: k = int(sys.argv[1]) except IndexError: k = 50 N = 5000 lines = frobenius_hashtags(N) hashtags = [line[0] for line in lines] htags = hashtags[:k] counties = frobenius_counties(htags, relative=relative) import json d = {} for geoid, score, counts in counties: d[geoid] = score d['min'] = 0 d['max'] = 1 with open('json/grids.counties.bot_filtered.fisherscores.json', 'w') as f: json.dump(d, f) d = {} for geoid, score, counts in counties: d[geoid] = (score, counts) if to_csv: lines = [] for geoid, score, counts in counties: line = [geoid, score] line.extend(counts) line = map(str, line) lines.append(','.join(line)) header = '# geoid,{0}fisher score, [counts]' if relative: header = header.format(' relative ') else: header = header.format(' ') lines.insert(0, header) filename = 'hashtag_fisherscores_relative_n{0}.csv' filename = filename.format(k) with open(filename, 'w') as f: f.write('\n'.join(lines)) return htags, counties, d def county_quants(k=None): if k is None: import sys try: k = int(sys.argv[1]) except IndexError: k = 50 N = 5000 lines = frobenius_hashtags(N) hashtags = [line[0] for line in lines] from multiprocessing import Pool import json p = Pool(22) kvals = range(10, 205, 5) runner = Runner(hashtags) quants = p.map(runner, kvals) quants = map(list, quants) d = [kvals, quants] with open('quants.json', 'w') as f: json.dump(d, f) return kvals, quants def plot_quants(kvals, quants): import matplotlib.pyplot as plt import seaborn quants = np.array(quants) quants = quants.transpose() plt.plot(quants[0], 'o-', label="25th percentile") plt.plot(quants[1], 'o-', label="50th percentile") plt.plot(quants[2], 'o-', label="75th percentile") plt.ylabel('Relative Fisher Score') plt.xlabel('Number of Hashtags') plt.legend(loc='best') plt.savefig('fisherscores.pdf')
unlicense
jaruba/chromium.src
third_party/markupsafe/__init__.py
371
8205
# -*- coding: utf-8 -*- """ markupsafe ~~~~~~~~~~ Implements a Markup string. :copyright: (c) 2010 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import re from markupsafe._compat import text_type, string_types, int_types, \ unichr, PY2 __all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent'] _striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)') _entity_re = re.compile(r'&([^;]+);') class Markup(text_type): r"""Marks a string as being safe for inclusion in HTML/XML output without needing to be escaped. This implements the `__html__` interface a couple of frameworks and web applications use. :class:`Markup` is a direct subclass of `unicode` and provides all the methods of `unicode` just that it escapes arguments passed and always returns `Markup`. The `escape` function returns markup objects so that double escaping can't happen. The constructor of the :class:`Markup` class can be used for three different things: When passed an unicode object it's assumed to be safe, when passed an object with an HTML representation (has an `__html__` method) that representation is used, otherwise the object passed is converted into a unicode string and then assumed to be safe: >>> Markup("Hello <em>World</em>!") Markup(u'Hello <em>World</em>!') >>> class Foo(object): ... def __html__(self): ... return '<a href="#">foo</a>' ... >>> Markup(Foo()) Markup(u'<a href="#">foo</a>') If you want object passed being always treated as unsafe you can use the :meth:`escape` classmethod to create a :class:`Markup` object: >>> Markup.escape("Hello <em>World</em>!") Markup(u'Hello &lt;em&gt;World&lt;/em&gt;!') Operations on a markup string are markup aware which means that all arguments are passed through the :func:`escape` function: >>> em = Markup("<em>%s</em>") >>> em % "foo & bar" Markup(u'<em>foo &amp; bar</em>') >>> strong = Markup("<strong>%(text)s</strong>") >>> strong % {'text': '<blink>hacker here</blink>'} Markup(u'<strong>&lt;blink&gt;hacker here&lt;/blink&gt;</strong>') >>> Markup("<em>Hello</em> ") + "<foo>" Markup(u'<em>Hello</em> &lt;foo&gt;') """ __slots__ = () def __new__(cls, base=u'', encoding=None, errors='strict'): if hasattr(base, '__html__'): base = base.__html__() if encoding is None: return text_type.__new__(cls, base) return text_type.__new__(cls, base, encoding, errors) def __html__(self): return self def __add__(self, other): if isinstance(other, string_types) or hasattr(other, '__html__'): return self.__class__(super(Markup, self).__add__(self.escape(other))) return NotImplemented def __radd__(self, other): if hasattr(other, '__html__') or isinstance(other, string_types): return self.escape(other).__add__(self) return NotImplemented def __mul__(self, num): if isinstance(num, int_types): return self.__class__(text_type.__mul__(self, num)) return NotImplemented __rmul__ = __mul__ def __mod__(self, arg): if isinstance(arg, tuple): arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg) else: arg = _MarkupEscapeHelper(arg, self.escape) return self.__class__(text_type.__mod__(self, arg)) def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, text_type.__repr__(self) ) def join(self, seq): return self.__class__(text_type.join(self, map(self.escape, seq))) join.__doc__ = text_type.join.__doc__ def split(self, *args, **kwargs): return list(map(self.__class__, text_type.split(self, *args, **kwargs))) split.__doc__ = text_type.split.__doc__ def rsplit(self, *args, **kwargs): return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs))) rsplit.__doc__ = text_type.rsplit.__doc__ def splitlines(self, *args, **kwargs): return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs))) splitlines.__doc__ = text_type.splitlines.__doc__ def unescape(self): r"""Unescape markup again into an text_type string. This also resolves known HTML4 and XHTML entities: >>> Markup("Main &raquo; <em>About</em>").unescape() u'Main \xbb <em>About</em>' """ from markupsafe._constants import HTML_ENTITIES def handle_match(m): name = m.group(1) if name in HTML_ENTITIES: return unichr(HTML_ENTITIES[name]) try: if name[:2] in ('#x', '#X'): return unichr(int(name[2:], 16)) elif name.startswith('#'): return unichr(int(name[1:])) except ValueError: pass return u'' return _entity_re.sub(handle_match, text_type(self)) def striptags(self): r"""Unescape markup into an text_type string and strip all tags. This also resolves known HTML4 and XHTML entities. Whitespace is normalized to one: >>> Markup("Main &raquo; <em>About</em>").striptags() u'Main \xbb About' """ stripped = u' '.join(_striptags_re.sub('', self).split()) return Markup(stripped).unescape() @classmethod def escape(cls, s): """Escape the string. Works like :func:`escape` with the difference that for subclasses of :class:`Markup` this function would return the correct subclass. """ rv = escape(s) if rv.__class__ is not cls: return cls(rv) return rv def make_wrapper(name): orig = getattr(text_type, name) def func(self, *args, **kwargs): args = _escape_argspec(list(args), enumerate(args), self.escape) #_escape_argspec(kwargs, kwargs.iteritems(), None) return self.__class__(orig(self, *args, **kwargs)) func.__name__ = orig.__name__ func.__doc__ = orig.__doc__ return func for method in '__getitem__', 'capitalize', \ 'title', 'lower', 'upper', 'replace', 'ljust', \ 'rjust', 'lstrip', 'rstrip', 'center', 'strip', \ 'translate', 'expandtabs', 'swapcase', 'zfill': locals()[method] = make_wrapper(method) # new in python 2.5 if hasattr(text_type, 'partition'): def partition(self, sep): return tuple(map(self.__class__, text_type.partition(self, self.escape(sep)))) def rpartition(self, sep): return tuple(map(self.__class__, text_type.rpartition(self, self.escape(sep)))) # new in python 2.6 if hasattr(text_type, 'format'): format = make_wrapper('format') # not in python 3 if hasattr(text_type, '__getslice__'): __getslice__ = make_wrapper('__getslice__') del method, make_wrapper def _escape_argspec(obj, iterable, escape): """Helper for various string-wrapped functions.""" for key, value in iterable: if hasattr(value, '__html__') or isinstance(value, string_types): obj[key] = escape(value) return obj class _MarkupEscapeHelper(object): """Helper for Markup.__mod__""" def __init__(self, obj, escape): self.obj = obj self.escape = escape __getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape) __unicode__ = __str__ = lambda s: text_type(s.escape(s.obj)) __repr__ = lambda s: str(s.escape(repr(s.obj))) __int__ = lambda s: int(s.obj) __float__ = lambda s: float(s.obj) # we have to import it down here as the speedups and native # modules imports the markup type which is define above. try: from markupsafe._speedups import escape, escape_silent, soft_unicode except ImportError: from markupsafe._native import escape, escape_silent, soft_unicode if not PY2: soft_str = soft_unicode __all__.append('soft_str')
bsd-3-clause
wpjesus/codematch
ietf/release/views.py
1
2562
import os import re import json import datetime import gzip from django.template import RequestContext from django.shortcuts import render_to_response from django.conf import settings from django.http import HttpResponse import changelog def trac_links(text): # changeset links text = re.sub(r'\[(\d+)\]', r'<a href="https://wiki.tools.ietf.org/tools/ietfdb/changeset/\1">[\1]</a>', text) # issue links text = re.sub(r'#(\d+)', r'<a href="https://wiki.tools.ietf.org/tools/ietfdb/ticket/\1">#\1</a>', text) return text def release(request, version=None): entries = {} if os.path.exists(settings.CHANGELOG_PATH): log_entries = changelog.parse(settings.CHANGELOG_PATH) else: return HttpResponse("Error: changelog file %s not found" % settings.CHANGELOG_PATH) next = None for entry in log_entries: if next: next.prev = entry entry.next = next next = entry entries = dict((entry.version, entry) for entry in log_entries) if version == None or version not in entries: version = log_entries[0].version entries[version].logentry = trac_links(entries[version].logentry.strip('\n')) code_coverage_url = None code_coverage_time = None if os.path.exists(settings.TEST_CODE_COVERAGE_REPORT_FILE) and version == log_entries[0].version: code_coverage_url = settings.TEST_CODE_COVERAGE_REPORT_URL code_coverage_time = datetime.datetime.fromtimestamp(os.path.getmtime(settings.TEST_CODE_COVERAGE_REPORT_FILE)) coverage = {} if os.path.exists(settings.TEST_COVERAGE_MASTER_FILE): if settings.TEST_COVERAGE_MASTER_FILE.endswith(".gz"): with gzip.open(settings.TEST_COVERAGE_MASTER_FILE, "rb") as file: coverage_data = json.load(file) else: with open(settings.TEST_COVERAGE_MASTER_FILE) as file: coverage_data = json.load(file) if version in coverage_data: coverage = coverage_data[version] for key in coverage: if "coverage" in coverage[key]: coverage[key]["percentage"] = coverage[key]["coverage"] * 100 return render_to_response('release/release.html', { 'releases': log_entries, 'version': version, 'entry': entries[version], 'coverage': coverage, 'code_coverage_url': code_coverage_url, 'code_coverage_time': code_coverage_time, }, context_instance=RequestContext(request))
bsd-3-clause
Midafi/scikit-image
skimage/viewer/utils/core.py
18
6556
import warnings import numpy as np from ..qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg import matplotlib as mpl from matplotlib.figure import Figure from matplotlib import _pylab_helpers from matplotlib.colors import LinearSegmentedColormap if has_qt and 'agg' not in mpl.get_backend().lower(): warnings.warn("Recommended matplotlib backend is `Agg` for full " "skimage.viewer functionality.") __all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage', 'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot', 'update_axes_image'] QApp = None def init_qtapp(): """Initialize QAppliction. The QApplication needs to be initialized before creating any QWidgets """ global QApp QApp = QtWidgets.QApplication.instance() if QApp is None: QApp = QtWidgets.QApplication([]) return QApp def is_event_loop_running(app=None): """Return True if event loop is running.""" if app is None: app = init_qtapp() if hasattr(app, '_in_event_loop'): return app._in_event_loop else: return False def start_qtapp(app=None): """Start Qt mainloop""" if app is None: app = init_qtapp() if not is_event_loop_running(app): app._in_event_loop = True app.exec_() app._in_event_loop = False else: app._in_event_loop = True class RequiredAttr(object): """A class attribute that must be set before use.""" instances = dict() def __init__(self, init_val=None): self.instances[self, None] = init_val def __get__(self, obj, objtype): value = self.instances[self, obj] if value is None: raise AttributeError('Required attribute not set') return value def __set__(self, obj, value): self.instances[self, obj] = value class LinearColormap(LinearSegmentedColormap): """LinearSegmentedColormap in which color varies smoothly. This class is a simplification of LinearSegmentedColormap, which doesn't support jumps in color intensities. Parameters ---------- name : str Name of colormap. segmented_data : dict Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values. Each color key contains a list of `x`, `y` tuples. `x` must increase monotonically from 0 to 1 and corresponds to input values for a mappable object (e.g. an image). `y` corresponds to the color intensity. """ def __init__(self, name, segmented_data, **kwargs): segmented_data = dict((key, [(x, y, y) for x, y in value]) for key, value in segmented_data.items()) LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs) class ClearColormap(LinearColormap): """Color map that varies linearly from alpha = 0 to 1 """ def __init__(self, rgb, max_alpha=1, name='clear_color'): r, g, b = rgb cg_speq = {'blue': [(0.0, b), (1.0, b)], 'green': [(0.0, g), (1.0, g)], 'red': [(0.0, r), (1.0, r)], 'alpha': [(0.0, 0.0), (1.0, max_alpha)]} LinearColormap.__init__(self, name, cg_speq) class FigureCanvas(FigureCanvasQTAgg): """Canvas for displaying images.""" def __init__(self, figure, **kwargs): self.fig = figure FigureCanvasQTAgg.__init__(self, self.fig) FigureCanvasQTAgg.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) FigureCanvasQTAgg.updateGeometry(self) def resizeEvent(self, event): FigureCanvasQTAgg.resizeEvent(self, event) # Call to `resize_event` missing in FigureManagerQT. # See https://github.com/matplotlib/matplotlib/pull/1585 self.resize_event() def new_canvas(*args, **kwargs): """Return a new figure canvas.""" allnums = _pylab_helpers.Gcf.figs.keys() num = max(allnums) + 1 if allnums else 1 FigureClass = kwargs.pop('FigureClass', Figure) figure = FigureClass(*args, **kwargs) canvas = FigureCanvas(figure) fig_manager = FigureManagerQT(canvas, num) return fig_manager.canvas def new_plot(parent=None, subplot_kw=None, **fig_kw): """Return new figure and axes. Parameters ---------- parent : QtWidget Qt widget that displays the plot objects. If None, you must manually call ``canvas.setParent`` and pass the parent widget. subplot_kw : dict Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``. fig_kw : dict Keyword arguments passed ``matplotlib.figure.Figure``. """ if subplot_kw is None: subplot_kw = {} canvas = new_canvas(**fig_kw) canvas.setParent(parent) fig = canvas.figure ax = fig.add_subplot(1, 1, 1, **subplot_kw) return fig, ax def figimage(image, scale=1, dpi=None, **kwargs): """Return figure and axes with figure tightly surrounding image. Unlike pyplot.figimage, this actually plots onto an axes object, which fills the figure. Plotting the image onto an axes allows for subsequent overlays of axes artists. Parameters ---------- image : array image to plot scale : float If scale is 1, the figure and axes have the same dimension as the image. Smaller values of `scale` will shrink the figure. dpi : int Dots per inch for figure. If None, use the default rcParam. """ dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi'] kwargs.setdefault('interpolation', 'nearest') kwargs.setdefault('cmap', 'gray') h, w, d = np.atleast_3d(image).shape figsize = np.array((w, h), dtype=float) / dpi * scale fig, ax = new_plot(figsize=figsize, dpi=dpi) fig.subplots_adjust(left=0, bottom=0, right=1, top=1) ax.set_axis_off() ax.imshow(image, **kwargs) ax.figure.canvas.draw() return fig, ax def update_axes_image(image_axes, image): """Update the image displayed by an image plot. This sets the image plot's array and updates its shape appropriately Parameters ---------- image_axes : `matplotlib.image.AxesImage` Image axes to update. image : array Image array. """ image_axes.set_array(image) # Adjust size if new image shape doesn't match the original h, w = image.shape[:2] image_axes.set_extent((0, w, h, 0))
bsd-3-clause
CLOUGH/info3180-project-2
lib/werkzeug/utils.py
317
22676
# -*- coding: utf-8 -*- """ werkzeug.utils ~~~~~~~~~~~~~~ This module implements various utilities for WSGI applications. Most of them are used by the request and response wrappers but especially for middleware development it makes sense to use them without the wrappers. :copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import os import sys import pkgutil try: from html.entities import name2codepoint except ImportError: from htmlentitydefs import name2codepoint from werkzeug._compat import unichr, text_type, string_types, iteritems, \ reraise, PY2 from werkzeug._internal import _DictAccessorProperty, \ _parse_signature, _missing _format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2)) _entity_re = re.compile(r'&([^;]+);') _filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]') _windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL') class cached_property(object): """A decorator that converts a function into a lazy property. The function wrapped is called the first time to retrieve the result and then that calculated result is used the next time you access the value:: class Foo(object): @cached_property def foo(self): # calculate something important here return 42 The class has to have a `__dict__` in order for this property to work. """ # implementation detail: this property is implemented as non-data # descriptor. non-data descriptors are only invoked if there is # no entry with the same name in the instance's __dict__. # this allows us to completely get rid of the access function call # overhead. If one choses to invoke __get__ by hand the property # will still work as expected because the lookup logic is replicated # in __get__ for manual invocation. def __init__(self, func, name=None, doc=None): self.__name__ = name or func.__name__ self.__module__ = func.__module__ self.__doc__ = doc or func.__doc__ self.func = func def __get__(self, obj, type=None): if obj is None: return self value = obj.__dict__.get(self.__name__, _missing) if value is _missing: value = self.func(obj) obj.__dict__[self.__name__] = value return value class environ_property(_DictAccessorProperty): """Maps request attributes to environment variables. This works not only for the Werzeug request object, but also any other class with an environ attribute: >>> class Test(object): ... environ = {'key': 'value'} ... test = environ_property('key') >>> var = Test() >>> var.test 'value' If you pass it a second value it's used as default if the key does not exist, the third one can be a converter that takes a value and converts it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value is used. If no default value is provided `None` is used. Per default the property is read only. You have to explicitly enable it by passing ``read_only=False`` to the constructor. """ read_only = True def lookup(self, obj): return obj.environ class header_property(_DictAccessorProperty): """Like `environ_property` but for headers.""" def lookup(self, obj): return obj.headers class HTMLBuilder(object): """Helper object for HTML generation. Per default there are two instances of that class. The `html` one, and the `xhtml` one for those two dialects. The class uses keyword parameters and positional parameters to generate small snippets of HTML. Keyword parameters are converted to XML/SGML attributes, positional arguments are used as children. Because Python accepts positional arguments before keyword arguments it's a good idea to use a list with the star-syntax for some children: >>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ', ... html.a('bar', href='bar.html')]) u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>' This class works around some browser limitations and can not be used for arbitrary SGML/XML generation. For that purpose lxml and similar libraries exist. Calling the builder escapes the string passed: >>> html.p(html("<foo>")) u'<p>&lt;foo&gt;</p>' """ _entity_re = re.compile(r'&([^;]+);') _entities = name2codepoint.copy() _entities['apos'] = 39 _empty_elements = set([ 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame', 'hr', 'img', 'input', 'keygen', 'isindex', 'link', 'meta', 'param', 'source', 'wbr' ]) _boolean_attributes = set([ 'selected', 'checked', 'compact', 'declare', 'defer', 'disabled', 'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap' ]) _plaintext_elements = set(['textarea']) _c_like_cdata = set(['script', 'style']) def __init__(self, dialect): self._dialect = dialect def __call__(self, s): return escape(s) def __getattr__(self, tag): if tag[:2] == '__': raise AttributeError(tag) def proxy(*children, **arguments): buffer = '<' + tag for key, value in iteritems(arguments): if value is None: continue if key[-1] == '_': key = key[:-1] if key in self._boolean_attributes: if not value: continue if self._dialect == 'xhtml': value = '="' + key + '"' else: value = '' else: value = '="' + escape(value) + '"' buffer += ' ' + key + value if not children and tag in self._empty_elements: if self._dialect == 'xhtml': buffer += ' />' else: buffer += '>' return buffer buffer += '>' children_as_string = ''.join([text_type(x) for x in children if x is not None]) if children_as_string: if tag in self._plaintext_elements: children_as_string = escape(children_as_string) elif tag in self._c_like_cdata and self._dialect == 'xhtml': children_as_string = '/*<![CDATA[*/' + \ children_as_string + '/*]]>*/' buffer += children_as_string + '</' + tag + '>' return buffer return proxy def __repr__(self): return '<%s for %r>' % ( self.__class__.__name__, self._dialect ) html = HTMLBuilder('html') xhtml = HTMLBuilder('xhtml') def get_content_type(mimetype, charset): """Return the full content type string with charset for a mimetype. If the mimetype represents text the charset will be appended as charset parameter, otherwise the mimetype is returned unchanged. :param mimetype: the mimetype to be used as content type. :param charset: the charset to be appended in case it was a text mimetype. :return: the content type. """ if mimetype.startswith('text/') or \ mimetype == 'application/xml' or \ (mimetype.startswith('application/') and mimetype.endswith('+xml')): mimetype += '; charset=' + charset return mimetype def format_string(string, context): """String-template format a string: >>> format_string('$foo and ${foo}s', dict(foo=42)) '42 and 42s' This does not do any attribute lookup etc. For more advanced string formattings have a look at the `werkzeug.template` module. :param string: the format string. :param context: a dict with the variables to insert. """ def lookup_arg(match): x = context[match.group(1) or match.group(2)] if not isinstance(x, string_types): x = type(string)(x) return x return _format_re.sub(lookup_arg, string) def secure_filename(filename): r"""Pass it a filename and it will return a secure version of it. This filename can then safely be stored on a regular file system and passed to :func:`os.path.join`. The filename returned is an ASCII only string for maximum portability. On windows system the function also makes sure that the file is not named after one of the special device files. >>> secure_filename("My cool movie.mov") 'My_cool_movie.mov' >>> secure_filename("../../../etc/passwd") 'etc_passwd' >>> secure_filename(u'i contain cool \xfcml\xe4uts.txt') 'i_contain_cool_umlauts.txt' The function might return an empty filename. It's your responsibility to ensure that the filename is unique and that you generate random filename if the function returned an empty one. .. versionadded:: 0.5 :param filename: the filename to secure """ if isinstance(filename, text_type): from unicodedata import normalize filename = normalize('NFKD', filename).encode('ascii', 'ignore') if not PY2: filename = filename.decode('ascii') for sep in os.path.sep, os.path.altsep: if sep: filename = filename.replace(sep, ' ') filename = str(_filename_ascii_strip_re.sub('', '_'.join( filename.split()))).strip('._') # on nt a couple of special files are present in each folder. We # have to ensure that the target file is not such a filename. In # this case we prepend an underline if os.name == 'nt' and filename and \ filename.split('.')[0].upper() in _windows_device_files: filename = '_' + filename return filename def escape(s, quote=None): """Replace special characters "&", "<", ">" and (") to HTML-safe sequences. There is a special handling for `None` which escapes to an empty string. .. versionchanged:: 0.9 `quote` is now implicitly on. :param s: the string to escape. :param quote: ignored. """ if s is None: return '' elif hasattr(s, '__html__'): return text_type(s.__html__()) elif not isinstance(s, string_types): s = text_type(s) if quote is not None: from warnings import warn warn(DeprecationWarning('quote parameter is implicit now'), stacklevel=2) s = s.replace('&', '&amp;').replace('<', '&lt;') \ .replace('>', '&gt;').replace('"', "&quot;") return s def unescape(s): """The reverse function of `escape`. This unescapes all the HTML entities, not only the XML entities inserted by `escape`. :param s: the string to unescape. """ def handle_match(m): name = m.group(1) if name in HTMLBuilder._entities: return unichr(HTMLBuilder._entities[name]) try: if name[:2] in ('#x', '#X'): return unichr(int(name[2:], 16)) elif name.startswith('#'): return unichr(int(name[1:])) except ValueError: pass return u'' return _entity_re.sub(handle_match, s) def redirect(location, code=302): """Return a response object (a WSGI application) that, if called, redirects the client to the target location. Supported codes are 301, 302, 303, 305, and 307. 300 is not supported because it's not a real redirect and 304 because it's the answer for a request with a request with defined If-Modified-Since headers. .. versionadded:: 0.6 The location can now be a unicode string that is encoded using the :func:`iri_to_uri` function. :param location: the location the response should redirect to. :param code: the redirect status code. defaults to 302. """ from werkzeug.wrappers import Response display_location = escape(location) if isinstance(location, text_type): from werkzeug.urls import iri_to_uri location = iri_to_uri(location) response = Response( '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n' '<title>Redirecting...</title>\n' '<h1>Redirecting...</h1>\n' '<p>You should be redirected automatically to target URL: ' '<a href="%s">%s</a>. If not click the link.' % (escape(location), display_location), code, mimetype='text/html') response.headers['Location'] = location return response def append_slash_redirect(environ, code=301): """Redirect to the same URL but with a slash appended. The behavior of this function is undefined if the path ends with a slash already. :param environ: the WSGI environment for the request that triggers the redirect. :param code: the status code for the redirect. """ new_path = environ['PATH_INFO'].strip('/') + '/' query_string = environ.get('QUERY_STRING') if query_string: new_path += '?' + query_string return redirect(new_path, code) def import_string(import_name, silent=False): """Imports an object based on a string. This is useful if you want to use import paths as endpoints or something similar. An import path can be specified either in dotted notation (``xml.sax.saxutils.escape``) or with a colon as object delimiter (``xml.sax.saxutils:escape``). If `silent` is True the return value will be `None` if the import fails. :param import_name: the dotted name for the object to import. :param silent: if set to `True` import errors are ignored and `None` is returned instead. :return: imported object """ #XXX: py3 review needed assert isinstance(import_name, string_types) # force the import name to automatically convert to strings import_name = str(import_name) try: if ':' in import_name: module, obj = import_name.split(':', 1) elif '.' in import_name: module, obj = import_name.rsplit('.', 1) else: return __import__(import_name) # __import__ is not able to handle unicode strings in the fromlist # if the module is a package if PY2 and isinstance(obj, unicode): obj = obj.encode('utf-8') try: return getattr(__import__(module, None, None, [obj]), obj) except (ImportError, AttributeError): # support importing modules not yet set up by the parent module # (or package for that matter) modname = module + '.' + obj __import__(modname) return sys.modules[modname] except ImportError as e: if not silent: reraise( ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2]) def find_modules(import_path, include_packages=False, recursive=False): """Find all the modules below a package. This can be useful to automatically import all views / controllers so that their metaclasses / function decorators have a chance to register themselves on the application. Packages are not returned unless `include_packages` is `True`. This can also recursively list modules but in that case it will import all the packages to get the correct load path of that module. :param import_name: the dotted name for the package to find child modules. :param include_packages: set to `True` if packages should be returned, too. :param recursive: set to `True` if recursion should happen. :return: generator """ module = import_string(import_path) path = getattr(module, '__path__', None) if path is None: raise ValueError('%r is not a package' % import_path) basename = module.__name__ + '.' for importer, modname, ispkg in pkgutil.iter_modules(path): modname = basename + modname if ispkg: if include_packages: yield modname if recursive: for item in find_modules(modname, include_packages, True): yield item else: yield modname def validate_arguments(func, args, kwargs, drop_extra=True): """Check if the function accepts the arguments and keyword arguments. Returns a new ``(args, kwargs)`` tuple that can safely be passed to the function without causing a `TypeError` because the function signature is incompatible. If `drop_extra` is set to `True` (which is the default) any extra positional or keyword arguments are dropped automatically. The exception raised provides three attributes: `missing` A set of argument names that the function expected but where missing. `extra` A dict of keyword arguments that the function can not handle but where provided. `extra_positional` A list of values that where given by positional argument but the function cannot accept. This can be useful for decorators that forward user submitted data to a view function:: from werkzeug.utils import ArgumentValidationError, validate_arguments def sanitize(f): def proxy(request): data = request.values.to_dict() try: args, kwargs = validate_arguments(f, (request,), data) except ArgumentValidationError: raise BadRequest('The browser failed to transmit all ' 'the data expected.') return f(*args, **kwargs) return proxy :param func: the function the validation is performed against. :param args: a tuple of positional arguments. :param kwargs: a dict of keyword arguments. :param drop_extra: set to `False` if you don't want extra arguments to be silently dropped. :return: tuple in the form ``(args, kwargs)``. """ parser = _parse_signature(func) args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5] if missing: raise ArgumentValidationError(tuple(missing)) elif (extra or extra_positional) and not drop_extra: raise ArgumentValidationError(None, extra, extra_positional) return tuple(args), kwargs def bind_arguments(func, args, kwargs): """Bind the arguments provided into a dict. When passed a function, a tuple of arguments and a dict of keyword arguments `bind_arguments` returns a dict of names as the function would see it. This can be useful to implement a cache decorator that uses the function arguments to build the cache key based on the values of the arguments. :param func: the function the arguments should be bound for. :param args: tuple of positional arguments. :param kwargs: a dict of keyword arguments. :return: a :class:`dict` of bound keyword arguments. """ args, kwargs, missing, extra, extra_positional, \ arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs) values = {} for (name, has_default, default), value in zip(arg_spec, args): values[name] = value if vararg_var is not None: values[vararg_var] = tuple(extra_positional) elif extra_positional: raise TypeError('too many positional arguments') if kwarg_var is not None: multikw = set(extra) & set([x[0] for x in arg_spec]) if multikw: raise TypeError('got multiple values for keyword argument ' + repr(next(iter(multikw)))) values[kwarg_var] = extra elif extra: raise TypeError('got unexpected keyword argument ' + repr(next(iter(extra)))) return values class ArgumentValidationError(ValueError): """Raised if :func:`validate_arguments` fails to validate""" def __init__(self, missing=None, extra=None, extra_positional=None): self.missing = set(missing or ()) self.extra = extra or {} self.extra_positional = extra_positional or [] ValueError.__init__(self, 'function arguments invalid. (' '%d missing, %d additional)' % ( len(self.missing), len(self.extra) + len(self.extra_positional) )) class ImportStringError(ImportError): """Provides information about a failed :func:`import_string` attempt.""" #: String in dotted notation that failed to be imported. import_name = None #: Wrapped exception. exception = None def __init__(self, import_name, exception): self.import_name = import_name self.exception = exception msg = ( 'import_string() failed for %r. Possible reasons are:\n\n' '- missing __init__.py in a package;\n' '- package or module path not included in sys.path;\n' '- duplicated package or module name taking precedence in ' 'sys.path;\n' '- missing module, class, function or variable;\n\n' 'Debugged import:\n\n%s\n\n' 'Original exception:\n\n%s: %s') name = '' tracked = [] for part in import_name.replace(':', '.').split('.'): name += (name and '.') + part imported = import_string(name, silent=True) if imported: tracked.append((name, getattr(imported, '__file__', None))) else: track = ['- %r found in %r.' % (n, i) for n, i in tracked] track.append('- %r not found.' % name) msg = msg % (import_name, '\n'.join(track), exception.__class__.__name__, str(exception)) break ImportError.__init__(self, msg) def __repr__(self): return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name, self.exception) # circular dependencies from werkzeug.http import quote_header_value, unquote_header_value, \ cookie_date # DEPRECATED # these objects were previously in this module as well. we import # them here for backwards compatibility with old pickles. from werkzeug.datastructures import MultiDict, CombinedMultiDict, \ Headers, EnvironHeaders from werkzeug.http import parse_cookie, dump_cookie
apache-2.0
humanoid-path-planner/hpp-corbaserver
src/hpp/__init__.py
1
1269
# Copyright (c) 2012 CNRS # Author: Florent Lamiraux # # This file is part of hpp-corbaserver. # hpp-corbaserver is free software: you can redistribute it # and/or modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation, either version # 3 of the License, or (at your option) any later version. # # hpp-corbaserver is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty # of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Lesser Public License for more details. You should have # received a copy of the GNU Lesser General Public License along with # hpp-corbaserver. If not, see # <http://www.gnu.org/licenses/>. from .quaternion import Quaternion from .transform import Transform def retrieveRosResource(path): import os ros_package_paths = os.environ["ROS_PACKAGE_PATH"].split(':') if path.startswith("package://"): relpath = path[len("package://"):] for dir in ros_package_paths: abspath = os.path.join(dir,relpath) if os.path.exists(abspath): return abspath return IOError ("Could not find resource " + path) else: return path
lgpl-3.0
tdtrask/ansible
lib/ansible/modules/utilities/logic/set_stats.py
58
1935
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2016 Ansible RedHat, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- author: "Brian Coca (@bcoca)" module: set_stats short_description: Set stats for the current ansible run description: - This module allows setting/accumulating stats on the current ansible run, either per host of for all hosts in the run. - This module is also supported for Windows targets. options: data: description: - A dictionary of which each key represents a stat (or variable) you want to keep track of required: true per_host: description: - boolean that indicates if the stats is per host or for all hosts in the run. required: no default: no aggregate: description: - boolean that indicates if the provided value is aggregated to the existing stat C(yes) or will replace it C(no) required: no default: yes notes: - This module is also supported for Windows targets. - In order for custom stats to be displayed, you must set C(show_custom_stats) in C(ansible.cfg) or C(ANSIBLE_SHOW_CUSTOM_STATS) to C(true). version_added: "2.3" ''' EXAMPLES = ''' # Aggregating packages_installed stat per host - set_stats: data: packages_installed: 31 # Aggregating random stats for all hosts using complex arguments - set_stats: data: one_stat: 11 other_stat: "{{ local_var * 2 }}" another_stat: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}" per_host: no # setting stats (not aggregating) - set_stats: data: the_answer: 42 aggregate: no '''
gpl-3.0
Werkov/PyQt4
__init__.py
7
1111
# Copyright (c) 2011 Riverbank Computing Limited <info@riverbankcomputing.com> # # This file is part of PyQt. # # This file may be used under the terms of the GNU General Public # License versions 2.0 or 3.0 as published by the Free Software # Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3 # included in the packaging of this file. Alternatively you may (at # your option) use any later version of the GNU General Public # License if such license has been publicly approved by Riverbank # Computing Limited (or its successors, if any) and the KDE Free Qt # Foundation. In addition, as a special exception, Riverbank gives you # certain additional rights. These rights are described in the Riverbank # GPL Exception version 1.1, which can be found in the file # GPL_EXCEPTION.txt in this package. # # If you are unsure which license is appropriate for your use, please # contact the sales department at sales@riverbankcomputing.com. # # This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE # WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
gpl-2.0
dyoung418/tensorflow
tensorflow/contrib/model_pruning/python/layers/rnn_cells_test.py
35
3609
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for creating different number of masks in rnn_cells.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.model_pruning.python import pruning from tensorflow.contrib.model_pruning.python.layers import rnn_cells from tensorflow.python.ops import random_ops from tensorflow.python.ops import rnn_cell as tf_rnn_cells from tensorflow.python.ops import variables from tensorflow.python.platform import test class RnnCellsTest(test.TestCase): def setUp(self): super(RnnCellsTest, self).setUp() self.batch_size = 8 self.dim = 10 def testMaskedBasicLSTMCell(self): expected_num_masks = 1 expected_num_rows = 2 * self.dim expected_num_cols = 4 * self.dim with self.test_session(): inputs = variables.Variable( random_ops.random_normal([self.batch_size, self.dim])) c = variables.Variable( random_ops.random_normal([self.batch_size, self.dim])) h = variables.Variable( random_ops.random_normal([self.batch_size, self.dim])) state = tf_rnn_cells.LSTMStateTuple(c, h) lstm_cell = rnn_cells.MaskedBasicLSTMCell(self.dim) lstm_cell(inputs, state) self.assertEqual(len(pruning.get_masks()), expected_num_masks) self.assertEqual(len(pruning.get_masked_weights()), expected_num_masks) self.assertEqual(len(pruning.get_thresholds()), expected_num_masks) self.assertEqual(len(pruning.get_weights()), expected_num_masks) for mask in pruning.get_masks(): self.assertEqual(mask.shape, (expected_num_rows, expected_num_cols)) for weight in pruning.get_weights(): self.assertEqual(weight.shape, (expected_num_rows, expected_num_cols)) def testMaskedLSTMCell(self): expected_num_masks = 1 expected_num_rows = 2 * self.dim expected_num_cols = 4 * self.dim with self.test_session(): inputs = variables.Variable( random_ops.random_normal([self.batch_size, self.dim])) c = variables.Variable( random_ops.random_normal([self.batch_size, self.dim])) h = variables.Variable( random_ops.random_normal([self.batch_size, self.dim])) state = tf_rnn_cells.LSTMStateTuple(c, h) lstm_cell = rnn_cells.MaskedLSTMCell(self.dim) lstm_cell(inputs, state) self.assertEqual(len(pruning.get_masks()), expected_num_masks) self.assertEqual(len(pruning.get_masked_weights()), expected_num_masks) self.assertEqual(len(pruning.get_thresholds()), expected_num_masks) self.assertEqual(len(pruning.get_weights()), expected_num_masks) for mask in pruning.get_masks(): self.assertEqual(mask.shape, (expected_num_rows, expected_num_cols)) for weight in pruning.get_weights(): self.assertEqual(weight.shape, (expected_num_rows, expected_num_cols)) if __name__ == '__main__': test.main()
apache-2.0
rawWhipIT/p22-goldant-buildpack
vendor/setuptools-7.0/setup.py
1
8423
#!/usr/bin/env python """Distutils setup file, used to install or test 'setuptools'""" import io import os import sys import textwrap import contextlib # Allow to run setup.py from another directory. os.chdir(os.path.dirname(os.path.abspath(__file__))) src_root = None from distutils.util import convert_path command_ns = {} init_path = convert_path('setuptools/command/__init__.py') with open(init_path) as init_file: exec(init_file.read(), command_ns) SETUP_COMMANDS = command_ns['__all__'] main_ns = {} ver_path = convert_path('setuptools/version.py') with open(ver_path) as ver_file: exec(ver_file.read(), main_ns) import setuptools from setuptools.command.build_py import build_py as _build_py from setuptools.command.test import test as _test scripts = [] def _gen_console_scripts(): yield "easy_install = setuptools.command.easy_install:main" # Gentoo distributions manage the python-version-specific scripts # themselves, so those platforms define an environment variable to # suppress the creation of the version-specific scripts. var_names = ( 'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT', 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT', ) if any(os.environ.get(var) not in (None, "", "0") for var in var_names): return yield ("easy_install-{shortver} = setuptools.command.easy_install:main" .format(shortver=sys.version[:3])) console_scripts = list(_gen_console_scripts()) # specific command that is used to generate windows .exe files class build_py(_build_py): def build_package_data(self): """Copy data files into build directory""" for package, src_dir, build_dir, filenames in self.data_files: for filename in filenames: target = os.path.join(build_dir, filename) self.mkpath(os.path.dirname(target)) srcfile = os.path.join(src_dir, filename) outf, copied = self.copy_file(srcfile, target) srcfile = os.path.abspath(srcfile) class test(_test): """Specific test class to avoid rewriting the entry_points.txt""" def run(self): with self._save_entry_points(): _test.run(self) @contextlib.contextmanager def _save_entry_points(self): entry_points = os.path.join('setuptools.egg-info', 'entry_points.txt') if not os.path.exists(entry_points): yield return # save the content with open(entry_points, 'rb') as f: ep_content = f.read() # run the tests try: yield finally: # restore the file with open(entry_points, 'wb') as f: f.write(ep_content) readme_file = io.open('README.txt', encoding='utf-8') # The release script adds hyperlinks to issues, # but if the release script has not run, fall back to the source file changes_names = 'CHANGES (links).txt', 'CHANGES.txt' changes_fn = next(iter(filter(os.path.exists, changes_names))) changes_file = io.open(changes_fn, encoding='utf-8') with readme_file: with changes_file: long_description = readme_file.read() + '\n' + changes_file.read() package_data = { 'setuptools': ['script (dev).tmpl', 'script.tmpl', 'site-patch.py']} force_windows_specific_files = ( os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES") not in (None, "", "0") ) if sys.platform == 'win32' or force_windows_specific_files: package_data.setdefault('setuptools', []).extend(['*.exe']) package_data.setdefault('setuptools.command', []).extend(['*.xml']) pytest_runner = ['pytest-runner'] if 'ptr' in sys.argv else [] setup_params = dict( name="setuptools", version=main_ns['__version__'], description="Easily download, build, install, upgrade, and uninstall " "Python packages", author="Python Packaging Authority", author_email="distutils-sig@python.org", license="PSF or ZPL", long_description=long_description, keywords="CPAN PyPI distutils eggs package management", url="https://bitbucket.org/pypa/setuptools", test_suite='setuptools.tests', src_root=src_root, packages=setuptools.find_packages(), package_data=package_data, py_modules=['pkg_resources', 'easy_install'], zip_safe=True, cmdclass={'test': test}, entry_points={ "distutils.commands": [ "%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s" % locals() for cmd in SETUP_COMMANDS ], "distutils.setup_keywords": [ "eager_resources = setuptools.dist:assert_string_list", "namespace_packages = setuptools.dist:check_nsp", "extras_require = setuptools.dist:check_extras", "install_requires = setuptools.dist:check_requirements", "tests_require = setuptools.dist:check_requirements", "setup_requires = setuptools.dist:check_requirements", "entry_points = setuptools.dist:check_entry_points", "test_suite = setuptools.dist:check_test_suite", "zip_safe = setuptools.dist:assert_bool", "package_data = setuptools.dist:check_package_data", "exclude_package_data = setuptools.dist:check_package_data", "include_package_data = setuptools.dist:assert_bool", "packages = setuptools.dist:check_packages", "dependency_links = setuptools.dist:assert_string_list", "test_loader = setuptools.dist:check_importable", "test_runner = setuptools.dist:check_importable", "use_2to3 = setuptools.dist:assert_bool", "convert_2to3_doctests = setuptools.dist:assert_string_list", "use_2to3_fixers = setuptools.dist:assert_string_list", "use_2to3_exclude_fixers = setuptools.dist:assert_string_list", ], "egg_info.writers": [ "PKG-INFO = setuptools.command.egg_info:write_pkg_info", "requires.txt = setuptools.command.egg_info:write_requirements", "entry_points.txt = setuptools.command.egg_info:write_entries", "eager_resources.txt = setuptools.command.egg_info:overwrite_arg", "namespace_packages.txt = setuptools.command.egg_info:overwrite_arg", "top_level.txt = setuptools.command.egg_info:write_toplevel_names", "depends.txt = setuptools.command.egg_info:warn_depends_obsolete", "dependency_links.txt = setuptools.command.egg_info:overwrite_arg", ], "console_scripts": console_scripts, "setuptools.file_finders": ["svn_cvs = setuptools.command.sdist:_default_revctrl"], "setuptools.installation": ['eggsecutable = setuptools.command.easy_install:bootstrap'], }, classifiers=textwrap.dedent(""" Development Status :: 5 - Production/Stable Intended Audience :: Developers License :: OSI Approved :: Python Software Foundation License License :: OSI Approved :: Zope Public License Operating System :: OS Independent Programming Language :: Python :: 2.6 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.1 Programming Language :: Python :: 3.2 Programming Language :: Python :: 3.3 Programming Language :: Python :: 3.4 Topic :: Software Development :: Libraries :: Python Modules Topic :: System :: Archiving :: Packaging Topic :: System :: Systems Administration Topic :: Utilities """).strip().splitlines(), extras_require={ "ssl:sys_platform=='win32'": "wincertstore==0.2", "certs": "certifi==1.0.1", }, dependency_links=[ 'https://pypi.python.org/packages/source/c/certifi/certifi-1.0.1.tar.gz#md5=45f5cb94b8af9e1df0f9450a8f61b790', 'https://pypi.python.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2', ], scripts=[], tests_require=[ 'setuptools[ssl]', 'pytest', ], setup_requires=[ ] + pytest_runner, ) if __name__ == '__main__': dist = setuptools.setup(**setup_params)
mit
fabiencro/knmt
nmt_chainer/training_module/train.py
1
27145
#!/usr/bin/env python """train.py: Train a RNNSearch Model""" from __future__ import absolute_import, division, print_function, unicode_literals __author__ = "Fabien Cromieres" __license__ = "undecided" __version__ = "1.0" __email__ = "fabien.cromieres@gmail.com" __status__ = "Development" import chainer from chainer import cuda, optimizers, serializers from .training import train_on_data from nmt_chainer.dataprocessing.indexer import Indexer from nmt_chainer.utilities.file_infos import create_filename_infos from nmt_chainer.utilities.argument_parsing_tools import OrderedNamespace import nmt_chainer.models.feedforward.encoder_decoder import numpy as np import logging import json import os.path import gzip import sys import pprint import time import os.path # import h5py from nmt_chainer.utilities.utils import ensure_path # , make_batch_src_tgt, make_batch_src, minibatch_provider, compute_bleu_with_unk_as_wrong,de_batch # from evaluation import ( # greedy_batch_translate, convert_idx_to_string, # compute_loss_all, translate_to_file, sample_once) import nmt_chainer.models.attention import nmt_chainer.models.encoder_decoder import nmt_chainer.models.rnn_cells as rnn_cells import nmt_chainer.dataprocessing.processors as processors import nmt_chainer.utilities.profiling_tools as profiling_tools logging.basicConfig() log = logging.getLogger("rnns:train") log.setLevel(logging.INFO) def generate_lexical_probability_dictionary_indexed(lexical_probability_dictionary_all, src_indexer, tgt_indexer): log.info("computing lexical_probability_dictionary_indexed") lexical_probability_dictionary_indexed = {} for ws in lexical_probability_dictionary_all: ws_idx_array = src_indexer.convert(ws) if len(ws_idx_array) > 1: log.warning("Converting an entry of the lexical probability dictionary resulted in several ids. " "Be aware that --lexical_probability_dictionary option is not fully compatible " "with fancier preprocessing options such as BPE.") ws_idx = ws_idx_array[0] if ws_idx in lexical_probability_dictionary_indexed: assert src_indexer.is_unk_idx(ws_idx) else: lexical_probability_dictionary_indexed[ws_idx] = {} for wt in lexical_probability_dictionary_all[ws]: wt_idx_array = tgt_indexer.convert(wt) if len(wt_idx_array) > 1: log.warning("Converting an entry of the lexical probability dictionary resulted in several ids. " "Be aware that --lexical_probability_dictionary option is not fully compatible " "with fancier preprocessing options such as BPE.") wt_idx = wt_idx_array[0] if wt_idx in lexical_probability_dictionary_indexed[ws_idx]: assert src_indexer.is_unk_idx( ws_idx) or tgt_indexer.is_unk_idx(wt_idx) lexical_probability_dictionary_indexed[ws_idx][wt_idx] += lexical_probability_dictionary_all[ws][wt] else: lexical_probability_dictionary_indexed[ws_idx][wt_idx] = lexical_probability_dictionary_all[ws][wt] return lexical_probability_dictionary_indexed def create_encdec_from_config_dict(config_dict, src_indexer, tgt_indexer): Vi = len(src_indexer) # + UNK Vo = len(tgt_indexer) # + UNK if config_dict.get("use_ff_model", False): d_model = config_dict["ff_d_model"] n_heads = config_dict["ff_n_heads"] nb_layers_src = config_dict["ff_nb_layers_src"] nb_layers_tgt = config_dict["ff_nb_layers_tgt"] use_exp_relu = config_dict["ff_use_exp_relu"] dropout = config_dict["ff_dropout"] d_ff = config_dict.get("ff_d_ff", 2048) if config_dict.get("use_own_layer_normalization", False): from nmt_chainer.additional_links.layer_normalization import turn_on_own_layer_normalization turn_on_own_layer_normalization() no_add = config_dict.get("ff_no_add", False) #backward compatibility if no_add: residual_mode = None else: residual_mode = config_dict.get("ff_residual_mode", "normal") no_normalize = config_dict.get("ff_no_normalize", False) encdec = nmt_chainer.models.feedforward.encoder_decoder.EncoderDecoder(Vi, Vo, d_model=d_model, n_heads=n_heads, d_ff=d_ff, experimental_relu=use_exp_relu, dropout=dropout, nb_layers_src=nb_layers_src, nb_layers_tgt=nb_layers_tgt, residual_mode = residual_mode, no_normalize = no_normalize) else: Ei = config_dict["Ei"] Hi = config_dict["Hi"] Eo = config_dict["Eo"] Ho = config_dict["Ho"] Ha = config_dict["Ha"] Hl = config_dict["Hl"] encoder_cell_type = config_dict.get("encoder_cell_type", "gru") decoder_cell_type = config_dict.get("decoder_cell_type", "gru") use_bn_length = config_dict.get("use_bn_length", None) # Selecting Attention type attn_cls = nmt_chainer.models.attention.AttentionModule if config_dict.get("use_accumulated_attn", False): raise NotImplemented if config_dict.get("use_deep_attn", False): attn_cls = nmt_chainer.models.attention.DeepAttentionModule init_orth = config_dict.get("init_orth", False) if "lexical_probability_dictionary" in config_dict and config_dict["lexical_probability_dictionary"] is not None: log.info("opening lexical_probability_dictionary %s" % config_dict["lexical_probability_dictionary"]) with gzip.open(config_dict["lexical_probability_dictionary"], "rb") as dict_file: lexical_probability_dictionary_all = json.loads(dict_file.read().decode('utf-8')) lexical_probability_dictionary = generate_lexical_probability_dictionary_indexed( lexical_probability_dictionary_all, src_indexer, tgt_indexer) else: lexical_probability_dictionary = None lex_epsilon = config_dict.get("lexicon_prob_epsilon", 0.001) use_goto_attention = config_dict.get("use_goto_attention", False) # Creating encoder/decoder encdec = nmt_chainer.models.encoder_decoder.EncoderDecoder(Vi, Ei, Hi, Vo + 1, Eo, Ho, Ha, Hl, use_bn_length=use_bn_length, attn_cls=attn_cls, init_orth=init_orth, encoder_cell_type=rnn_cells.create_cell_model_from_config(encoder_cell_type), decoder_cell_type=rnn_cells.create_cell_model_from_config(decoder_cell_type), lexical_probability_dictionary=lexical_probability_dictionary, lex_epsilon=lex_epsilon, use_goto_attention=use_goto_attention) return encdec class NpzDeserializerAverage(chainer.serializer.Deserializer): def __init__(self, npz_list, path='', strict=True): self.npz_list = npz_list self.path = path self.strict = strict def __getitem__(self, key): key = key.strip('/') return NpzDeserializerAverage( self.npz_list, self.path + key + '/', strict=self.strict) def __call__(self, key, value): key = self.path + key.lstrip('/') if not self.strict and key not in self.npz: return value dataset = None for npz in self.npz_list: try: this_d = npz[key] except KeyError: this_d = npz["updater/model:main/"+key] if dataset is None: dataset = this_d else: dataset = dataset + this_d dataset /= len(self.npz_list) if value is None: return dataset elif isinstance(value, np.ndarray): np.copyto(value, dataset) elif isinstance(value, cuda.ndarray): value.set(np.asarray(dataset)) else: value = type(value)(np.asarray(dataset)) return value def load_npz_average(filename_list, obj): d = NpzDeserializerAverage([np.load(filename) for filename in filename_list]) d.load(obj) def load_model_flexible(filename_list, encdec): mode = "normal" if isinstance(filename_list, tuple) or isinstance(filename_list, list): if len(filename_list) == 1: filename_list = filename_list[0] else: mode = "average" if mode == "normal": log.info("loading model parameters from %s", filename_list) try: serializers.load_npz(filename_list, encdec) except KeyError: log.info("not model format, trying snapshot format") with np.load(filename_list) as fseri: dicseri = serializers.NpzDeserializer(fseri, path="updater/model:main/") dicseri.load(encdec) else: assert mode == "average" log.info("loading averaged model parameters from %r", filename_list) dseri = NpzDeserializerAverage([np.load(filename) for filename in filename_list]) dseri.load(encdec) def create_encdec_and_indexers_from_config_dict(config_dict, src_indexer=None, tgt_indexer=None, load_config_model="no", return_model_infos=False, additional_models_parameters_for_averaging=None): assert load_config_model in "yes no if_exists".split() if src_indexer is None or tgt_indexer is None: voc_fn = config_dict.data["voc"] log.info("loading voc from %s" % voc_fn) # src_voc, tgt_voc = json.load(open(voc_fn)) bi_idx = processors.load_pp_pair_from_file(voc_fn) if src_indexer is None: src_indexer = bi_idx.src_processor() if tgt_indexer is None: tgt_indexer = bi_idx.tgt_processor() # tgt_voc = None # src_voc = None encdec = create_encdec_from_config_dict(config_dict["model"], src_indexer, tgt_indexer) eos_idx = len(tgt_indexer) model_infos = None if load_config_model != "no": if "model_parameters" not in config_dict: assert additional_models_parameters_for_averaging is None if load_config_model == "yes": log.error("cannot find model parameters in config file") raise ValueError( "Config file do not contain model_parameters section") else: model_filename = config_dict.model_parameters.filename if additional_models_parameters_for_averaging is not None: load_model_flexible([model_filename]+additional_models_parameters_for_averaging, encdec) else: load_model_flexible(model_filename, encdec) # if config_dict.model_parameters.type == "model": # log.info( # "loading model parameters from file specified by config file:%s" % # model_filename) # serializers.load_npz(model_filename, encdec) # if return_model_infos: # model_infos = create_filename_infos(model_filename) # else: # log.info("loading model parameters from snapshot file specified by config file:%s" %model_filename) # with np.load(model_filename) as fs: # dics = serializers.NpzDeserializer(fs, path="updater/model:main/") # dics.load(encdec) if return_model_infos: model_infos = create_filename_infos(model_filename) else: assert additional_models_parameters_for_averaging is None result = encdec, eos_idx, src_indexer, tgt_indexer if return_model_infos: return result, model_infos else: return result def load_voc_and_update_training_config(config_training): data_prefix = config_training["training_management"]["data_prefix"] voc_fn = data_prefix + ".voc" data_fn = data_prefix + ".data.json.gz" log.info("loading voc from %s" % voc_fn) # src_voc, tgt_voc = json.load(open(voc_fn)) bi_idx = processors.load_pp_pair_from_file(voc_fn) src_indexer, tgt_indexer = bi_idx.src_processor(), bi_idx.tgt_processor() # src_indexer = processors.PreProcessor.make_from_serializable(src_voc) # tgt_indexer = processors.PreProcessor.make_from_serializable(tgt_voc) # tgt_voc = None # src_voc = None # Vi = len(src_voc) + 1 # + UNK # Vo = len(tgt_voc) + 1 # + UNK Vi = len(src_indexer) # + UNK Vo = len(tgt_indexer) # + UNK config_training.add_section("data", keep_at_bottom="metadata", overwrite=False) config_training["data"]["data_fn"] = data_fn config_training["data"]["Vi"] = Vi config_training["data"]["Vo"] = Vo config_training["data"]["voc"] = voc_fn config_training.set_readonly() return src_indexer, tgt_indexer def do_train(config_training): if config_training["training_management"]["disable_cudnn_softmax"]: import nmt_chainer.models.feedforward.multi_attention nmt_chainer.models.feedforward.multi_attention.disable_cudnn_softmax = True src_indexer, tgt_indexer = load_voc_and_update_training_config(config_training) save_prefix = config_training.training_management.save_prefix output_files_dict = {} output_files_dict["train_config"] = save_prefix + ".train.config" output_files_dict["model_ckpt"] = save_prefix + ".model." + "ckpt" + ".npz" output_files_dict["model_final"] = save_prefix + \ ".model." + "final" + ".npz" output_files_dict["model_best"] = save_prefix + ".model." + "best" + ".npz" output_files_dict["model_best_loss"] = save_prefix + ".model." + "best_loss" + ".npz" # output_files_dict["model_ckpt_config"] = save_prefix + ".model." + "ckpt" + ".config" # output_files_dict["model_final_config"] = save_prefix + ".model." + "final" + ".config" # output_files_dict["model_best_config"] = save_prefix + ".model." + "best" + ".config" # output_files_dict["model_best_loss_config"] = save_prefix + ".model." + "best_loss" + ".config" output_files_dict["test_translation_output"] = save_prefix + ".test.out" output_files_dict["test_src_output"] = save_prefix + ".test.src.out" output_files_dict["dev_translation_output"] = save_prefix + ".dev.out" output_files_dict["dev_src_output"] = save_prefix + ".dev.src.out" output_files_dict["valid_translation_output"] = save_prefix + ".valid.out" output_files_dict["valid_src_output"] = save_prefix + ".valid.src.out" output_files_dict["sqlite_db"] = save_prefix + ".result.sqlite" output_files_dict["optimizer_ckpt"] = save_prefix + ".optimizer." + "ckpt" + ".npz" output_files_dict["optimizer_final"] = save_prefix + ".optimizer." + "final" + ".npz" save_prefix_dir, save_prefix_fn = os.path.split(save_prefix) ensure_path(save_prefix_dir) already_existing_files = [] for key_info, filename in output_files_dict.items(): # , valid_data_fn]: if os.path.exists(filename): already_existing_files.append(filename) if len(already_existing_files) > 0: print("Warning: existing files are going to be replaced / updated: ", already_existing_files) if not config_training.training_management.force_overwrite: input("Press Enter to Continue") save_train_config_fn = output_files_dict["train_config"] log.info("Saving training config to %s" % save_train_config_fn) config_training.save_to(save_train_config_fn) # json.dump(config_training, open(save_train_config_fn, "w"), indent=2, separators=(',', ': ')) Vi = len(src_indexer) # + UNK Vo = len(tgt_indexer) # + UNK eos_idx = Vo data_fn = config_training.data.data_fn log.info("loading training data from %s" % data_fn) with gzip.open(data_fn, "r") as input_file: training_data_all = json.loads(input_file.read().decode('utf-8')) training_data = training_data_all["train"] log.info("loaded %i sentences as training data" % len(training_data)) if "test" in training_data_all: test_data = training_data_all["test"] log.info("Found test data: %i sentences" % len(test_data)) else: test_data = None log.info("No test data found") if "dev" in training_data_all: dev_data = training_data_all["dev"] log.info("Found dev data: %i sentences" % len(dev_data)) else: dev_data = None log.info("No dev data found") if "valid" in training_data_all: valid_data = training_data_all["valid"] log.info("Found valid data: %i sentences" % len(valid_data)) else: valid_data = None log.info("No valid data found") max_src_tgt_length = config_training.training_management.max_src_tgt_length if max_src_tgt_length is not None: log.info("filtering sentences of length larger than %i" % (max_src_tgt_length)) filtered_training_data = [] nb_filtered = 0 for src, tgt in training_data: if len(src) <= max_src_tgt_length and len( tgt) <= max_src_tgt_length: filtered_training_data.append((src, tgt)) else: nb_filtered += 1 log.info("filtered %i sentences of length larger than %i" % (nb_filtered, max_src_tgt_length)) training_data = filtered_training_data if not config_training.training.no_shuffle_of_training_data: log.info("shuffling") import random random.shuffle(training_data) log.info("done") encdec, _, _, _ = create_encdec_and_indexers_from_config_dict(config_training, src_indexer=src_indexer, tgt_indexer=tgt_indexer, load_config_model="if_exists" if config_training.training_management.resume else "no") if (config_training.training.get("load_initial_source_embeddings", None) is not None or config_training.training.get("load_initial_target_embeddings", None) is not None): src_emb = None tgt_emb = None src_emb_fn = config_training.training.get("load_initial_source_embeddings", None) tgt_emb_fn = config_training.training.get("load_initial_target_embeddings", None) if src_emb_fn is not None: log.info("loading source embeddings from %s", src_emb_fn) src_emb = np.load(src_emb_fn) if tgt_emb_fn is not None: log.info("loading target embeddings from %s", tgt_emb_fn) tgt_emb = np.load(tgt_emb_fn) encdec.initialize_embeddings(src_emb, tgt_emb, no_unk_src=True, no_unk_tgt=True) # create_encdec_from_config_dict(config_training.model, src_indexer, tgt_indexer, # load_config_model = "if_exists" if config_training.training_management.resume else "no") # if config_training.training_management.resume: # if "model_parameters" not in config_training: # log.error("cannot find model parameters in config file") # if config_training.model_parameters.type == "model": # model_filename = config_training.model_parameters.filename # log.info("resuming from model parameters %s" % model_filename) # serializers.load_npz(model_filename, encdec) if config_training.training_management.load_model is not None: log.info("loading model parameters from %s", config_training.training_management.load_model) load_model_flexible(config_training.training_management.load_model, encdec) # try: # serializers.load_npz(config_training.training_management.load_model, encdec) # except KeyError: # log.info("not model format, trying snapshot format") # with np.load(config_training.training_management.load_model) as fseri: # dicseri = serializers.NpzDeserializer(fseri, path="updater/model:main/") # dicseri.load(encdec) gpu = config_training.training_management.gpu if config_training.training_management.use_chainerx: if gpu is not None: encdec = encdec.to_device("cuda:%i"%gpu) else: encdec = encdec.to_device("native:0") else: if gpu is not None: encdec = encdec.to_gpu(gpu) if config_training.training.optimizer == "adadelta": optimizer = optimizers.AdaDelta() elif config_training.training.optimizer == "adam": optimizer = optimizers.Adam() elif config_training.training.optimizer == "scheduled_adam": from nmt_chainer.additional_links.scheduled_adam import ScheduledAdam optimizer = ScheduledAdam(d_model=config_training.model.ff_d_model) elif config_training.training.optimizer == "adagrad": optimizer = optimizers.AdaGrad(lr=config_training.training.learning_rate) elif config_training.training.optimizer == "sgd": optimizer = optimizers.SGD(lr=config_training.training.learning_rate) elif config_training.training.optimizer == "momentum": optimizer = optimizers.MomentumSGD(lr=config_training.training.learning_rate, momentum=config_training.training.momentum) elif config_training.training.optimizer == "nesterov": optimizer = optimizers.NesterovAG(lr=config_training.training.learning_rate, momentum=config_training.training.momentum) elif config_training.training.optimizer == "rmsprop": optimizer = optimizers.RMSprop(lr=config_training.training.learning_rate) elif config_training.training.optimizer == "rmspropgraves": optimizer = optimizers.RMSpropGraves(lr=config_training.training.learning_rate, momentum=config_training.training.momentum) else: raise NotImplemented with cuda.get_device_from_id(gpu): optimizer.setup(encdec) if config_training.training.l2_gradient_clipping is not None and config_training.training.l2_gradient_clipping > 0: optimizer.add_hook(chainer.optimizer_hooks.GradientClipping( config_training.training.l2_gradient_clipping)) if config_training.training.hard_gradient_clipping is not None and config_training.training.hard_gradient_clipping > 0: optimizer.add_hook(chainer.optimizer_hooks.GradientHardClipping( *config_training.training.hard_gradient_clipping)) if config_training.training.weight_decay is not None: optimizer.add_hook( chainer.optimizer_hooks.WeightDecay( config_training.training.weight_decay)) if config_training.training_management.load_optimizer_state is not None: with cuda.get_device_from_id(gpu): log.info("loading optimizer parameters from %s", config_training.training_management.load_optimizer_state) serializers.load_npz(config_training.training_management.load_optimizer_state, optimizer) if config_training.training_management.timer_hook: timer_hook = profiling_tools.MyTimerHook else: import contextlib @contextlib.contextmanager def timer_hook(): yield from . import training_chainer with cuda.get_device_from_id(gpu): with timer_hook() as timer_infos: if config_training.training_management.max_nb_iters is not None: stop_trigger = ( config_training.training_management.max_nb_iters, "iteration") if config_training.training_management.max_nb_epochs is not None: log.warn( "max_nb_iters and max_nb_epochs both specified. Only max_nb_iters will be considered.") elif config_training.training_management.max_nb_epochs is not None: stop_trigger = ( config_training.training_management.max_nb_epochs, "epoch") else: stop_trigger = None training_chainer.train_on_data_chainer(encdec, optimizer, training_data, output_files_dict, src_indexer, tgt_indexer, eos_idx=eos_idx, config_training=config_training, stop_trigger=stop_trigger, test_data=test_data, dev_data=dev_data, valid_data=valid_data, use_chainerx=config_training.training_management.use_chainerx ) # # import sys # sys.exit(0) # with cuda.get_device_from_id(args.gpu): # # with MyTimerHook() as timer: # # try: # train_on_data(encdec, optimizer, training_data, output_files_dict, # src_indexer, tgt_indexer, eos_idx = eos_idx, # mb_size = args.mb_size, # nb_of_batch_to_sort = args.nb_batch_to_sort, # test_data = test_data, dev_data = dev_data, valid_data = valid_data, gpu = args.gpu, report_every = args.report_every, # randomized = args.randomized_data, reverse_src = args.reverse_src, reverse_tgt = args.reverse_tgt, # max_nb_iters = args.max_nb_iters, do_not_save_data_for_resuming = args.no_resume, # noise_on_prev_word = args.noise_on_prev_word, curiculum_training = args.curiculum_training, # use_previous_prediction = args.use_previous_prediction, no_report_or_save = args.no_report_or_save, # use_memory_optimization = args.use_memory_optimization, # sample_every = args.sample_every, # use_reinf = args.use_reinf, # save_ckpt_every = args.save_ckpt_every # # lexical_probability_dictionary = lexical_probability_dictionary, # # V_tgt = Vo + 1, # # lexicon_prob_epsilon = args.lexicon_prob_epsilon # ) # # finally: # # print(timer) # # timer.print_sorted() # # print("total time:") # # print(timer.total_time()) # #
gpl-3.0
ShashaQin/erpnext
erpnext/manufacturing/doctype/bom/bom.py
1
14808
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import cint, cstr, flt from frappe import _ from frappe.model.document import Document from operator import itemgetter form_grid_templates = { "items": "templates/form_grid/item_grid.html" } class BOM(Document): def autoname(self): last_name = frappe.db.sql("""select max(name) from `tabBOM` where name like "BOM/{0}/%%" and item=%s """.format(frappe.db.escape(self.item, percent=False)), self.item) if last_name: idx = cint(cstr(last_name[0][0]).split('/')[-1].split('-')[0]) + 1 else: idx = 1 self.name = 'BOM/' + self.item + ('/%.3i' % idx) def validate(self): self.clear_operations() self.validate_main_item() from erpnext.utilities.transaction_base import validate_uom_is_integer validate_uom_is_integer(self, "stock_uom", "qty", "BOM Item") self.validate_materials() self.set_bom_material_details() self.calculate_cost() self.validate_operations() def on_update(self): self.check_recursion() self.update_exploded_items() def on_submit(self): self.manage_default_bom() def on_cancel(self): frappe.db.set(self, "is_active", 0) frappe.db.set(self, "is_default", 0) # check if used in any other bom self.validate_bom_links() self.manage_default_bom() def on_update_after_submit(self): self.validate_bom_links() self.manage_default_bom() def get_item_det(self, item_code): item = frappe.db.sql("""select name, item_name, is_asset_item, is_purchase_item, docstatus, description, image, is_sub_contracted_item, stock_uom, default_bom, last_purchase_rate from `tabItem` where name=%s""", item_code, as_dict = 1) if not item: frappe.throw(_("Item: {0} does not exist in the system").format(item_code)) return item def validate_rm_item(self, item): if item[0]['name'] == self.item: frappe.throw(_("Raw material cannot be same as main Item")) def set_bom_material_details(self): for item in self.get("items"): ret = self.get_bom_material_detail({"item_code": item.item_code, "item_name": item.item_name, "bom_no": item.bom_no, "qty": item.qty}) for r in ret: if not item.get(r): item.set(r, ret[r]) def get_bom_material_detail(self, args=None): """ Get raw material details like uom, desc and rate""" if not args: args = frappe.form_dict.get('args') if isinstance(args, basestring): import json args = json.loads(args) item = self.get_item_det(args['item_code']) self.validate_rm_item(item) args['bom_no'] = args['bom_no'] or item and cstr(item[0]['default_bom']) or '' args.update(item[0]) rate = self.get_rm_rate(args) ret_item = { 'item_name' : item and args['item_name'] or '', 'description' : item and args['description'] or '', 'image' : item and args['image'] or '', 'stock_uom' : item and args['stock_uom'] or '', 'bom_no' : args['bom_no'], 'rate' : rate } return ret_item def get_rm_rate(self, arg): """ Get raw material rate as per selected method, if bom exists takes bom cost """ rate = 0 if arg['bom_no']: rate = self.get_bom_unitcost(arg['bom_no']) elif arg and (arg['is_purchase_item'] == 1 or arg['is_sub_contracted_item'] == 1): if self.rm_cost_as_per == 'Valuation Rate': rate = self.get_valuation_rate(arg) elif self.rm_cost_as_per == 'Last Purchase Rate': rate = arg['last_purchase_rate'] elif self.rm_cost_as_per == "Price List": if not self.buying_price_list: frappe.throw(_("Please select Price List")) rate = frappe.db.get_value("Item Price", {"price_list": self.buying_price_list, "item_code": arg["item_code"]}, "price_list_rate") or 0 return rate def update_cost(self): if self.docstatus == 2: return for d in self.get("items"): rate = self.get_bom_material_detail({'item_code': d.item_code, 'bom_no': d.bom_no, 'qty': d.qty})["rate"] if rate: d.rate = rate if self.docstatus == 1: self.flags.ignore_validate_update_after_submit = True self.calculate_cost() self.save() self.update_exploded_items() frappe.msgprint(_("Cost Updated")) def get_bom_unitcost(self, bom_no): bom = frappe.db.sql("""select name, total_cost/quantity as unit_cost from `tabBOM` where is_active = 1 and name = %s""", bom_no, as_dict=1) return bom and bom[0]['unit_cost'] or 0 def get_valuation_rate(self, args): """ Get weighted average of valuation rate from all warehouses """ total_qty, total_value, valuation_rate = 0.0, 0.0, 0.0 for d in frappe.db.sql("""select actual_qty, stock_value from `tabBin` where item_code=%s""", args['item_code'], as_dict=1): total_qty += flt(d.actual_qty) total_value += flt(d.stock_value) if total_qty: valuation_rate = total_value / total_qty if valuation_rate <= 0: last_valuation_rate = frappe.db.sql("""select valuation_rate from `tabStock Ledger Entry` where item_code = %s and valuation_rate > 0 order by posting_date desc, posting_time desc, name desc limit 1""", args['item_code']) valuation_rate = flt(last_valuation_rate[0][0]) if last_valuation_rate else 0 return valuation_rate def manage_default_bom(self): """ Uncheck others if current one is selected as default, update default bom in item master """ if self.is_default and self.is_active: from frappe.model.utils import set_default set_default(self, "item") item = frappe.get_doc("Item", self.item) if item.default_bom != self.name: item.default_bom = self.name item.save(ignore_permissions = True) else: frappe.db.set(self, "is_default", 0) item = frappe.get_doc("Item", self.item) if item.default_bom == self.name: item.default_bom = None item.save(ignore_permissions = True) def clear_operations(self): if not self.with_operations: self.set('operations', []) def validate_main_item(self): """ Validate main FG item""" item = self.get_item_det(self.item) if not item: frappe.throw(_("Item {0} does not exist in the system or has expired").format(self.item)) else: ret = frappe.db.get_value("Item", self.item, ["description", "stock_uom", "item_name"]) self.description = ret[0] self.uom = ret[1] self.item_name= ret[2] if not self.quantity: frappe.throw(_("Quantity should be greater than 0")) def validate_materials(self): """ Validate raw material entries """ if not self.get('items'): frappe.throw(_("Raw Materials cannot be blank.")) check_list = [] for m in self.get('items'): if m.bom_no: validate_bom_no(m.item_code, m.bom_no) if flt(m.qty) <= 0: frappe.throw(_("Quantity required for Item {0} in row {1}").format(m.item_code, m.idx)) check_list.append(cstr(m.item_code)) unique_chk_list = set(check_list) if len(unique_chk_list) != len(check_list): frappe.throw(_("Same item has been entered multiple times.")) def check_recursion(self): """ Check whether recursion occurs in any bom""" check_list = [['parent', 'bom_no', 'parent'], ['bom_no', 'parent', 'child']] for d in check_list: bom_list, count = [self.name], 0 while (len(bom_list) > count ): boms = frappe.db.sql(" select %s from `tabBOM Item` where %s = %s " % (d[0], d[1], '%s'), cstr(bom_list[count])) count = count + 1 for b in boms: if b[0] == self.name: frappe.throw(_("BOM recursion: {0} cannot be parent or child of {2}").format(b[0], self.name)) if b[0]: bom_list.append(b[0]) def update_cost_and_exploded_items(self, bom_list=[]): bom_list = self.traverse_tree(bom_list) for bom in bom_list: bom_obj = frappe.get_doc("BOM", bom) bom_obj.on_update() return bom_list def traverse_tree(self, bom_list=[]): def _get_children(bom_no): return [cstr(d[0]) for d in frappe.db.sql("""select bom_no from `tabBOM Item` where parent = %s and ifnull(bom_no, '') != ''""", bom_no)] count = 0 if self.name not in bom_list: bom_list.append(self.name) while(count < len(bom_list)): for child_bom in _get_children(bom_list[count]): if child_bom not in bom_list: bom_list.append(child_bom) count += 1 bom_list.reverse() return bom_list def calculate_cost(self): """Calculate bom totals""" self.calculate_op_cost() self.calculate_rm_cost() self.total_cost = self.operating_cost + self.raw_material_cost def calculate_op_cost(self): """Update workstation rate and calculates totals""" self.operating_cost = 0 for d in self.get('operations'): if d.workstation: if not d.hour_rate: d.hour_rate = flt(frappe.db.get_value("Workstation", d.workstation, "hour_rate")) if d.hour_rate and d.time_in_mins: d.operating_cost = flt(d.hour_rate) * flt(d.time_in_mins) / 60.0 self.operating_cost += flt(d.operating_cost) def calculate_rm_cost(self): """Fetch RM rate as per today's valuation rate and calculate totals""" total_rm_cost = 0 for d in self.get('items'): if d.bom_no: d.rate = self.get_bom_unitcost(d.bom_no) d.amount = flt(d.rate, self.precision("rate", d)) * flt(d.qty, self.precision("qty", d)) d.qty_consumed_per_unit = flt(d.qty, self.precision("qty", d)) / flt(self.quantity, self.precision("quantity")) total_rm_cost += d.amount self.raw_material_cost = total_rm_cost def update_exploded_items(self): """ Update Flat BOM, following will be correct data""" self.get_exploded_items() self.add_exploded_items() def get_exploded_items(self): """ Get all raw materials including items from child bom""" self.cur_exploded_items = {} for d in self.get('items'): if d.bom_no: self.get_child_exploded_items(d.bom_no, d.qty) else: self.add_to_cur_exploded_items(frappe._dict({ 'item_code' : d.item_code, 'item_name' : d.item_name, 'description' : d.description, 'image' : d.image, 'stock_uom' : d.stock_uom, 'qty' : flt(d.qty), 'rate' : flt(d.rate), })) def add_to_cur_exploded_items(self, args): if self.cur_exploded_items.get(args.item_code): self.cur_exploded_items[args.item_code]["qty"] += args.qty else: self.cur_exploded_items[args.item_code] = args def get_child_exploded_items(self, bom_no, qty): """ Add all items from Flat BOM of child BOM""" # Did not use qty_consumed_per_unit in the query, as it leads to rounding loss child_fb_items = frappe.db.sql("""select bom_item.item_code, bom_item.item_name, bom_item.description, bom_item.stock_uom, bom_item.qty, bom_item.rate, bom_item.qty / ifnull(bom.quantity, 1) as qty_consumed_per_unit from `tabBOM Explosion Item` bom_item, tabBOM bom where bom_item.parent = bom.name and bom.name = %s and bom.docstatus = 1""", bom_no, as_dict = 1) for d in child_fb_items: self.add_to_cur_exploded_items(frappe._dict({ 'item_code' : d['item_code'], 'item_name' : d['item_name'], 'description' : d['description'], 'stock_uom' : d['stock_uom'], 'qty' : d['qty_consumed_per_unit']*qty, 'rate' : flt(d['rate']), })) def add_exploded_items(self): "Add items to Flat BOM table" frappe.db.sql("""delete from `tabBOM Explosion Item` where parent=%s""", self.name) self.set('exploded_items', []) for d in sorted(self.cur_exploded_items, key=itemgetter(0)): ch = self.append('exploded_items', {}) for i in self.cur_exploded_items[d].keys(): ch.set(i, self.cur_exploded_items[d][i]) ch.amount = flt(ch.qty) * flt(ch.rate) ch.qty_consumed_per_unit = flt(ch.qty) / flt(self.quantity) ch.docstatus = self.docstatus ch.db_insert() def validate_bom_links(self): if not self.is_active: act_pbom = frappe.db.sql("""select distinct bom_item.parent from `tabBOM Item` bom_item where bom_item.bom_no = %s and bom_item.docstatus = 1 and exists (select * from `tabBOM` where name = bom_item.parent and docstatus = 1 and is_active = 1)""", self.name) if act_pbom and act_pbom[0][0]: frappe.throw(_("Cannot deactivate or cancel BOM as it is linked with other BOMs")) def validate_operations(self): if self.with_operations and not self.get('operations'): frappe.throw(_("Operations cannot be left blank.")) def get_bom_items_as_dict(bom, company, qty=1, fetch_exploded=1): item_dict = {} # Did not use qty_consumed_per_unit in the query, as it leads to rounding loss query = """select bom_item.item_code, item.item_name, sum(bom_item.qty/ifnull(bom.quantity, 1)) * %(qty)s as qty, item.description, item.image, item.stock_uom, item.default_warehouse, item.expense_account as expense_account, item.buying_cost_center as cost_center from `tab{table}` bom_item, `tabBOM` bom, `tabItem` item where bom_item.parent = bom.name and bom_item.docstatus < 2 and bom_item.parent = %(bom)s and item.name = bom_item.item_code and is_stock_item = 1 {conditions} group by item_code, stock_uom""" if fetch_exploded: query = query.format(table="BOM Explosion Item", conditions="""and item.is_sub_contracted_item = 0""") items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True) else: query = query.format(table="BOM Item", conditions="") items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True) # make unique for item in items: if item_dict.has_key(item.item_code): item_dict[item.item_code]["qty"] += flt(item.qty) else: item_dict[item.item_code] = item for item, item_details in item_dict.items(): for d in [["Account", "expense_account", "default_expense_account"], ["Cost Center", "cost_center", "cost_center"], ["Warehouse", "default_warehouse", ""]]: company_in_record = frappe.db.get_value(d[0], item_details.get(d[1]), "company") if not item_details.get(d[1]) or (company_in_record and company != company_in_record): item_dict[item][d[1]] = frappe.db.get_value("Company", company, d[2]) if d[2] else None return item_dict @frappe.whitelist() def get_bom_items(bom, company, qty=1, fetch_exploded=1): items = get_bom_items_as_dict(bom, company, qty, fetch_exploded).values() items.sort(lambda a, b: a.item_code > b.item_code and 1 or -1) return items def validate_bom_no(item, bom_no): """Validate BOM No of sub-contracted items""" bom = frappe.get_doc("BOM", bom_no) if not bom.is_active: frappe.throw(_("BOM {0} must be active").format(bom_no)) if bom.docstatus != 1: if not getattr(frappe.flags, "in_test", False): frappe.throw(_("BOM {0} must be submitted").format(bom_no)) if item and not (bom.item.lower() == item.lower() or \ bom.item.lower() == cstr(frappe.db.get_value("Item", item, "variant_of")).lower()): frappe.throw(_("BOM {0} does not belong to Item {1}").format(bom_no, item))
agpl-3.0
dstockwell/catapult
third_party/Paste/paste/debug/watchthreads.py
50
10839
""" Watches the key ``paste.httpserver.thread_pool`` to see how many threads there are and report on any wedged threads. """ import sys import cgi import time import traceback from cStringIO import StringIO from thread import get_ident from paste import httpexceptions from paste.request import construct_url, parse_formvars from paste.util.template import HTMLTemplate, bunch page_template = HTMLTemplate(''' <html> <head> <style type="text/css"> body { font-family: sans-serif; } table.environ tr td { border-bottom: #bbb 1px solid; } table.environ tr td.bottom { border-bottom: none; } table.thread { border: 1px solid #000; margin-bottom: 1em; } table.thread tr td { border-bottom: #999 1px solid; padding-right: 1em; } table.thread tr td.bottom { border-bottom: none; } table.thread tr.this_thread td { background-color: #006; color: #fff; } a.button { background-color: #ddd; border: #aaa outset 2px; text-decoration: none; margin-top: 10px; font-size: 80%; color: #000; } a.button:hover { background-color: #eee; border: #bbb outset 2px; } a.button:active { border: #bbb inset 2px; } </style> <title>{{title}}</title> </head> <body> <h1>{{title}}</h1> {{if kill_thread_id}} <div style="background-color: #060; color: #fff; border: 2px solid #000;"> Thread {{kill_thread_id}} killed </div> {{endif}} <div>Pool size: {{nworkers}} {{if actual_workers > nworkers}} + {{actual_workers-nworkers}} extra {{endif}} ({{nworkers_used}} used including current request)<br> idle: {{len(track_threads["idle"])}}, busy: {{len(track_threads["busy"])}}, hung: {{len(track_threads["hung"])}}, dying: {{len(track_threads["dying"])}}, zombie: {{len(track_threads["zombie"])}}</div> {{for thread in threads}} <table class="thread"> <tr {{if thread.thread_id == this_thread_id}}class="this_thread"{{endif}}> <td> <b>Thread</b> {{if thread.thread_id == this_thread_id}} (<i>this</i> request) {{endif}}</td> <td> <b>{{thread.thread_id}} {{if allow_kill}} <form action="{{script_name}}/kill" method="POST" style="display: inline"> <input type="hidden" name="thread_id" value="{{thread.thread_id}}"> <input type="submit" value="kill"> </form> {{endif}} </b> </td> </tr> <tr> <td>Time processing request</td> <td>{{thread.time_html|html}}</td> </tr> <tr> <td>URI</td> <td>{{if thread.uri == 'unknown'}} unknown {{else}}<a href="{{thread.uri}}">{{thread.uri_short}}</a> {{endif}} </td> <tr> <td colspan="2" class="bottom"> <a href="#" class="button" style="width: 9em; display: block" onclick=" var el = document.getElementById('environ-{{thread.thread_id}}'); if (el.style.display) { el.style.display = ''; this.innerHTML = \'&#9662; Hide environ\'; } else { el.style.display = 'none'; this.innerHTML = \'&#9656; Show environ\'; } return false ">&#9656; Show environ</a> <div id="environ-{{thread.thread_id}}" style="display: none"> {{if thread.environ:}} <table class="environ"> {{for loop, item in looper(sorted(thread.environ.items()))}} {{py:key, value=item}} <tr> <td {{if loop.last}}class="bottom"{{endif}}>{{key}}</td> <td {{if loop.last}}class="bottom"{{endif}}>{{value}}</td> </tr> {{endfor}} </table> {{else}} Thread is in process of starting {{endif}} </div> {{if thread.traceback}} <a href="#" class="button" style="width: 9em; display: block" onclick=" var el = document.getElementById('traceback-{{thread.thread_id}}'); if (el.style.display) { el.style.display = ''; this.innerHTML = \'&#9662; Hide traceback\'; } else { el.style.display = 'none'; this.innerHTML = \'&#9656; Show traceback\'; } return false ">&#9656; Show traceback</a> <div id="traceback-{{thread.thread_id}}" style="display: none"> <pre class="traceback">{{thread.traceback}}</pre> </div> {{endif}} </td> </tr> </table> {{endfor}} </body> </html> ''', name='watchthreads.page_template') class WatchThreads(object): """ Application that watches the threads in ``paste.httpserver``, showing the length each thread has been working on a request. If allow_kill is true, then you can kill errant threads through this application. This application can expose private information (specifically in the environment, like cookies), so it should be protected. """ def __init__(self, allow_kill=False): self.allow_kill = allow_kill def __call__(self, environ, start_response): if 'paste.httpserver.thread_pool' not in environ: start_response('403 Forbidden', [('Content-type', 'text/plain')]) return ['You must use the threaded Paste HTTP server to use this application'] if environ.get('PATH_INFO') == '/kill': return self.kill(environ, start_response) else: return self.show(environ, start_response) def show(self, environ, start_response): start_response('200 OK', [('Content-type', 'text/html')]) form = parse_formvars(environ) if form.get('kill'): kill_thread_id = form['kill'] else: kill_thread_id = None thread_pool = environ['paste.httpserver.thread_pool'] nworkers = thread_pool.nworkers now = time.time() workers = thread_pool.worker_tracker.items() workers.sort(key=lambda v: v[1][0]) threads = [] for thread_id, (time_started, worker_environ) in workers: thread = bunch() threads.append(thread) if worker_environ: thread.uri = construct_url(worker_environ) else: thread.uri = 'unknown' thread.thread_id = thread_id thread.time_html = format_time(now-time_started) thread.uri_short = shorten(thread.uri) thread.environ = worker_environ thread.traceback = traceback_thread(thread_id) page = page_template.substitute( title="Thread Pool Worker Tracker", nworkers=nworkers, actual_workers=len(thread_pool.workers), nworkers_used=len(workers), script_name=environ['SCRIPT_NAME'], kill_thread_id=kill_thread_id, allow_kill=self.allow_kill, threads=threads, this_thread_id=get_ident(), track_threads=thread_pool.track_threads()) return [page] def kill(self, environ, start_response): if not self.allow_kill: exc = httpexceptions.HTTPForbidden( 'Killing threads has not been enabled. Shame on you ' 'for trying!') return exc(environ, start_response) vars = parse_formvars(environ) thread_id = int(vars['thread_id']) thread_pool = environ['paste.httpserver.thread_pool'] if thread_id not in thread_pool.worker_tracker: exc = httpexceptions.PreconditionFailed( 'You tried to kill thread %s, but it is not working on ' 'any requests' % thread_id) return exc(environ, start_response) thread_pool.kill_worker(thread_id) script_name = environ['SCRIPT_NAME'] or '/' exc = httpexceptions.HTTPFound( headers=[('Location', script_name+'?kill=%s' % thread_id)]) return exc(environ, start_response) def traceback_thread(thread_id): """ Returns a plain-text traceback of the given thread, or None if it can't get a traceback. """ if not hasattr(sys, '_current_frames'): # Only 2.5 has support for this, with this special function return None frames = sys._current_frames() if not thread_id in frames: return None frame = frames[thread_id] out = StringIO() traceback.print_stack(frame, file=out) return out.getvalue() hide_keys = ['paste.httpserver.thread_pool'] def format_environ(environ): if environ is None: return environ_template.substitute( key='---', value='No environment registered for this thread yet') environ_rows = [] for key, value in sorted(environ.items()): if key in hide_keys: continue try: if key.upper() != key: value = repr(value) environ_rows.append( environ_template.substitute( key=cgi.escape(str(key)), value=cgi.escape(str(value)))) except Exception as e: environ_rows.append( environ_template.substitute( key=cgi.escape(str(key)), value='Error in <code>repr()</code>: %s' % e)) return ''.join(environ_rows) def format_time(time_length): if time_length >= 60*60: # More than an hour time_string = '%i:%02i:%02i' % (int(time_length/60/60), int(time_length/60) % 60, time_length % 60) elif time_length >= 120: time_string = '%i:%02i' % (int(time_length/60), time_length % 60) elif time_length > 60: time_string = '%i sec' % time_length elif time_length > 1: time_string = '%0.1f sec' % time_length else: time_string = '%0.2f sec' % time_length if time_length < 5: return time_string elif time_length < 120: return '<span style="color: #900">%s</span>' % time_string else: return '<span style="background-color: #600; color: #fff">%s</span>' % time_string def shorten(s): if len(s) > 60: return s[:40]+'...'+s[-10:] else: return s def make_watch_threads(global_conf, allow_kill=False): from paste.deploy.converters import asbool return WatchThreads(allow_kill=asbool(allow_kill)) make_watch_threads.__doc__ = WatchThreads.__doc__ def make_bad_app(global_conf, pause=0): pause = int(pause) def bad_app(environ, start_response): import thread if pause: time.sleep(pause) else: count = 0 while 1: print("I'm alive %s (%s)" % (count, thread.get_ident())) time.sleep(10) count += 1 start_response('200 OK', [('content-type', 'text/plain')]) return ['OK, paused %s seconds' % pause] return bad_app
bsd-3-clause
normtown/SickRage
lib/sqlalchemy/dialects/sybase/base.py
78
28800
# sybase/base.py # Copyright (C) 2010-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # get_select_precolumns(), limit_clause() implementation # copyright (C) 2007 Fisch Asset Management # AG http://www.fam.ch, with coding by Alexander Houben # alexander.houben@thor-solutions.ch # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sybase :name: Sybase .. note:: The Sybase dialect functions on current SQLAlchemy versions but is not regularly tested, and may have many issues and caveats not currently handled. """ import operator import re from sqlalchemy.sql import compiler, expression, text, bindparam from sqlalchemy.engine import default, base, reflection from sqlalchemy import types as sqltypes from sqlalchemy.sql import operators as sql_operators from sqlalchemy import schema as sa_schema from sqlalchemy import util, sql, exc from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\ TEXT, DATE, DATETIME, FLOAT, NUMERIC,\ BIGINT, INT, INTEGER, SMALLINT, BINARY,\ VARBINARY, DECIMAL, TIMESTAMP, Unicode,\ UnicodeText, REAL RESERVED_WORDS = set([ "add", "all", "alter", "and", "any", "as", "asc", "backup", "begin", "between", "bigint", "binary", "bit", "bottom", "break", "by", "call", "capability", "cascade", "case", "cast", "char", "char_convert", "character", "check", "checkpoint", "close", "comment", "commit", "connect", "constraint", "contains", "continue", "convert", "create", "cross", "cube", "current", "current_timestamp", "current_user", "cursor", "date", "dbspace", "deallocate", "dec", "decimal", "declare", "default", "delete", "deleting", "desc", "distinct", "do", "double", "drop", "dynamic", "else", "elseif", "encrypted", "end", "endif", "escape", "except", "exception", "exec", "execute", "existing", "exists", "externlogin", "fetch", "first", "float", "for", "force", "foreign", "forward", "from", "full", "goto", "grant", "group", "having", "holdlock", "identified", "if", "in", "index", "index_lparen", "inner", "inout", "insensitive", "insert", "inserting", "install", "instead", "int", "integer", "integrated", "intersect", "into", "iq", "is", "isolation", "join", "key", "lateral", "left", "like", "lock", "login", "long", "match", "membership", "message", "mode", "modify", "natural", "new", "no", "noholdlock", "not", "notify", "null", "numeric", "of", "off", "on", "open", "option", "options", "or", "order", "others", "out", "outer", "over", "passthrough", "precision", "prepare", "primary", "print", "privileges", "proc", "procedure", "publication", "raiserror", "readtext", "real", "reference", "references", "release", "remote", "remove", "rename", "reorganize", "resource", "restore", "restrict", "return", "revoke", "right", "rollback", "rollup", "save", "savepoint", "scroll", "select", "sensitive", "session", "set", "setuser", "share", "smallint", "some", "sqlcode", "sqlstate", "start", "stop", "subtrans", "subtransaction", "synchronize", "syntax_error", "table", "temporary", "then", "time", "timestamp", "tinyint", "to", "top", "tran", "trigger", "truncate", "tsequal", "unbounded", "union", "unique", "unknown", "unsigned", "update", "updating", "user", "using", "validate", "values", "varbinary", "varchar", "variable", "varying", "view", "wait", "waitfor", "when", "where", "while", "window", "with", "with_cube", "with_lparen", "with_rollup", "within", "work", "writetext", ]) class _SybaseUnitypeMixin(object): """these types appear to return a buffer object.""" def result_processor(self, dialect, coltype): def process(value): if value is not None: return str(value) # decode("ucs-2") else: return None return process class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode): __visit_name__ = 'UNICHAR' class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode): __visit_name__ = 'UNIVARCHAR' class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText): __visit_name__ = 'UNITEXT' class TINYINT(sqltypes.Integer): __visit_name__ = 'TINYINT' class BIT(sqltypes.TypeEngine): __visit_name__ = 'BIT' class MONEY(sqltypes.TypeEngine): __visit_name__ = "MONEY" class SMALLMONEY(sqltypes.TypeEngine): __visit_name__ = "SMALLMONEY" class UNIQUEIDENTIFIER(sqltypes.TypeEngine): __visit_name__ = "UNIQUEIDENTIFIER" class IMAGE(sqltypes.LargeBinary): __visit_name__ = 'IMAGE' class SybaseTypeCompiler(compiler.GenericTypeCompiler): def visit_large_binary(self, type_): return self.visit_IMAGE(type_) def visit_boolean(self, type_): return self.visit_BIT(type_) def visit_unicode(self, type_): return self.visit_NVARCHAR(type_) def visit_UNICHAR(self, type_): return "UNICHAR(%d)" % type_.length def visit_UNIVARCHAR(self, type_): return "UNIVARCHAR(%d)" % type_.length def visit_UNITEXT(self, type_): return "UNITEXT" def visit_TINYINT(self, type_): return "TINYINT" def visit_IMAGE(self, type_): return "IMAGE" def visit_BIT(self, type_): return "BIT" def visit_MONEY(self, type_): return "MONEY" def visit_SMALLMONEY(self, type_): return "SMALLMONEY" def visit_UNIQUEIDENTIFIER(self, type_): return "UNIQUEIDENTIFIER" ischema_names = { 'bigint': BIGINT, 'int': INTEGER, 'integer': INTEGER, 'smallint': SMALLINT, 'tinyint': TINYINT, 'unsigned bigint': BIGINT, # TODO: unsigned flags 'unsigned int': INTEGER, # TODO: unsigned flags 'unsigned smallint': SMALLINT, # TODO: unsigned flags 'numeric': NUMERIC, 'decimal': DECIMAL, 'dec': DECIMAL, 'float': FLOAT, 'double': NUMERIC, # TODO 'double precision': NUMERIC, # TODO 'real': REAL, 'smallmoney': SMALLMONEY, 'money': MONEY, 'smalldatetime': DATETIME, 'datetime': DATETIME, 'date': DATE, 'time': TIME, 'char': CHAR, 'character': CHAR, 'varchar': VARCHAR, 'character varying': VARCHAR, 'char varying': VARCHAR, 'unichar': UNICHAR, 'unicode character': UNIVARCHAR, 'nchar': NCHAR, 'national char': NCHAR, 'national character': NCHAR, 'nvarchar': NVARCHAR, 'nchar varying': NVARCHAR, 'national char varying': NVARCHAR, 'national character varying': NVARCHAR, 'text': TEXT, 'unitext': UNITEXT, 'binary': BINARY, 'varbinary': VARBINARY, 'image': IMAGE, 'bit': BIT, # not in documentation for ASE 15.7 'long varchar': TEXT, # TODO 'timestamp': TIMESTAMP, 'uniqueidentifier': UNIQUEIDENTIFIER, } class SybaseInspector(reflection.Inspector): def __init__(self, conn): reflection.Inspector.__init__(self, conn) def get_table_id(self, table_name, schema=None): """Return the table id from `table_name` and `schema`.""" return self.dialect.get_table_id(self.bind, table_name, schema, info_cache=self.info_cache) class SybaseExecutionContext(default.DefaultExecutionContext): _enable_identity_insert = False def set_ddl_autocommit(self, connection, value): """Must be implemented by subclasses to accommodate DDL executions. "connection" is the raw unwrapped DBAPI connection. "value" is True or False. when True, the connection should be configured such that a DDL can take place subsequently. when False, a DDL has taken place and the connection should be resumed into non-autocommit mode. """ raise NotImplementedError() def pre_exec(self): if self.isinsert: tbl = self.compiled.statement.table seq_column = tbl._autoincrement_column insert_has_sequence = seq_column is not None if insert_has_sequence: self._enable_identity_insert = \ seq_column.key in self.compiled_parameters[0] else: self._enable_identity_insert = False if self._enable_identity_insert: self.cursor.execute("SET IDENTITY_INSERT %s ON" % self.dialect.identifier_preparer.format_table(tbl)) if self.isddl: # TODO: to enhance this, we can detect "ddl in tran" on the # database settings. this error message should be improved to # include a note about that. if not self.should_autocommit: raise exc.InvalidRequestError( "The Sybase dialect only supports " "DDL in 'autocommit' mode at this time.") self.root_connection.engine.logger.info( "AUTOCOMMIT (Assuming no Sybase 'ddl in tran')") self.set_ddl_autocommit( self.root_connection.connection.connection, True) def post_exec(self): if self.isddl: self.set_ddl_autocommit(self.root_connection, False) if self._enable_identity_insert: self.cursor.execute( "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer. format_table(self.compiled.statement.table) ) def get_lastrowid(self): cursor = self.create_cursor() cursor.execute("SELECT @@identity AS lastrowid") lastrowid = cursor.fetchone()[0] cursor.close() return lastrowid class SybaseSQLCompiler(compiler.SQLCompiler): ansi_bind_rules = True extract_map = util.update_copy( compiler.SQLCompiler.extract_map, { 'doy': 'dayofyear', 'dow': 'weekday', 'milliseconds': 'millisecond' }) def get_select_precolumns(self, select): s = select._distinct and "DISTINCT " or "" # TODO: don't think Sybase supports # bind params for FIRST / TOP if select._limit: #if select._limit == 1: #s += "FIRST " #else: #s += "TOP %s " % (select._limit,) s += "TOP %s " % (select._limit,) if select._offset: if not select._limit: # FIXME: sybase doesn't allow an offset without a limit # so use a huge value for TOP here s += "TOP 1000000 " s += "START AT %s " % (select._offset + 1,) return s def get_from_hint_text(self, table, text): return text def limit_clause(self, select): # Limit in sybase is after the select keyword return "" def visit_extract(self, extract, **kw): field = self.extract_map.get(extract.field, extract.field) return 'DATEPART("%s", %s)' % ( field, self.process(extract.expr, **kw)) def visit_now_func(self, fn, **kw): return "GETDATE()" def for_update_clause(self, select): # "FOR UPDATE" is only allowed on "DECLARE CURSOR" # which SQLAlchemy doesn't use return '' def order_by_clause(self, select, **kw): kw['literal_binds'] = True order_by = self.process(select._order_by_clause, **kw) # SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT if order_by and (not self.is_subquery() or select._limit): return " ORDER BY " + order_by else: return "" class SybaseDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) + " " + \ self.dialect.type_compiler.process(column.type) if column.table is None: raise exc.CompileError( "The Sybase dialect requires Table-bound " "columns in order to generate DDL") seq_col = column.table._autoincrement_column # install a IDENTITY Sequence if we have an implicit IDENTITY column if seq_col is column: sequence = isinstance(column.default, sa_schema.Sequence) \ and column.default if sequence: start, increment = sequence.start or 1, \ sequence.increment or 1 else: start, increment = 1, 1 if (start, increment) == (1, 1): colspec += " IDENTITY" else: # TODO: need correct syntax for this colspec += " IDENTITY(%s,%s)" % (start, increment) else: default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if column.nullable is not None: if not column.nullable or column.primary_key: colspec += " NOT NULL" else: colspec += " NULL" return colspec def visit_drop_index(self, drop): index = drop.element return "\nDROP INDEX %s.%s" % ( self.preparer.quote_identifier(index.table.name), self._prepared_index_name(drop.element, include_schema=False) ) class SybaseIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS class SybaseDialect(default.DefaultDialect): name = 'sybase' supports_unicode_statements = False supports_sane_rowcount = False supports_sane_multi_rowcount = False supports_native_boolean = False supports_unicode_binds = False postfetch_lastrowid = True colspecs = {} ischema_names = ischema_names type_compiler = SybaseTypeCompiler statement_compiler = SybaseSQLCompiler ddl_compiler = SybaseDDLCompiler preparer = SybaseIdentifierPreparer inspector = SybaseInspector construct_arguments = [] def _get_default_schema_name(self, connection): return connection.scalar( text("SELECT user_name() as user_name", typemap={'user_name': Unicode}) ) def initialize(self, connection): super(SybaseDialect, self).initialize(connection) if self.server_version_info is not None and\ self.server_version_info < (15, ): self.max_identifier_length = 30 else: self.max_identifier_length = 255 def get_table_id(self, connection, table_name, schema=None, **kw): """Fetch the id for schema.table_name. Several reflection methods require the table id. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_id = None if schema is None: schema = self.default_schema_name TABLEID_SQL = text(""" SELECT o.id AS id FROM sysobjects o JOIN sysusers u ON o.uid=u.uid WHERE u.name = :schema_name AND o.name = :table_name AND o.type in ('U', 'V') """) if util.py2k: if isinstance(schema, unicode): schema = schema.encode("ascii") if isinstance(table_name, unicode): table_name = table_name.encode("ascii") result = connection.execute(TABLEID_SQL, schema_name=schema, table_name=table_name) table_id = result.scalar() if table_id is None: raise exc.NoSuchTableError(table_name) return table_id @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id(connection, table_name, schema, info_cache=kw.get("info_cache")) COLUMN_SQL = text(""" SELECT col.name AS name, t.name AS type, (col.status & 8) AS nullable, (col.status & 128) AS autoincrement, com.text AS 'default', col.prec AS precision, col.scale AS scale, col.length AS length FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON col.cdefault = com.id WHERE col.usertype = t.usertype AND col.id = :table_id ORDER BY col.colid """) results = connection.execute(COLUMN_SQL, table_id=table_id) columns = [] for (name, type_, nullable, autoincrement, default, precision, scale, length) in results: col_info = self._get_column_info(name, type_, bool(nullable), bool(autoincrement), default, precision, scale, length) columns.append(col_info) return columns def _get_column_info(self, name, type_, nullable, autoincrement, default, precision, scale, length): coltype = self.ischema_names.get(type_, None) kwargs = {} if coltype in (NUMERIC, DECIMAL): args = (precision, scale) elif coltype == FLOAT: args = (precision,) elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR): args = (length,) else: args = () if coltype: coltype = coltype(*args, **kwargs) #is this necessary #if is_array: # coltype = ARRAY(coltype) else: util.warn("Did not recognize type '%s' of column '%s'" % (type_, name)) coltype = sqltypes.NULLTYPE if default: default = re.sub("DEFAULT", "", default).strip() default = re.sub("^'(.*)'$", lambda m: m.group(1), default) else: default = None column_info = dict(name=name, type=coltype, nullable=nullable, default=default, autoincrement=autoincrement) return column_info @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id(connection, table_name, schema, info_cache=kw.get("info_cache")) table_cache = {} column_cache = {} foreign_keys = [] table_cache[table_id] = {"name": table_name, "schema": schema} COLUMN_SQL = text(""" SELECT c.colid AS id, c.name AS name FROM syscolumns c WHERE c.id = :table_id """) results = connection.execute(COLUMN_SQL, table_id=table_id) columns = {} for col in results: columns[col["id"]] = col["name"] column_cache[table_id] = columns REFCONSTRAINT_SQL = text(""" SELECT o.name AS name, r.reftabid AS reftable_id, r.keycnt AS 'count', r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3, r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6, r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9, r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12, r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15, r.fokey16 AS fokey16, r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3, r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6, r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9, r.refkey10 AS refkey10, r.refkey11 AS refkey11, r.refkey12 AS refkey12, r.refkey13 AS refkey13, r.refkey14 AS refkey14, r.refkey15 AS refkey15, r.refkey16 AS refkey16 FROM sysreferences r JOIN sysobjects o on r.tableid = o.id WHERE r.tableid = :table_id """) referential_constraints = connection.execute(REFCONSTRAINT_SQL, table_id=table_id) REFTABLE_SQL = text(""" SELECT o.name AS name, u.name AS 'schema' FROM sysobjects o JOIN sysusers u ON o.uid = u.uid WHERE o.id = :table_id """) for r in referential_constraints: reftable_id = r["reftable_id"] if reftable_id not in table_cache: c = connection.execute(REFTABLE_SQL, table_id=reftable_id) reftable = c.fetchone() c.close() table_info = {"name": reftable["name"], "schema": None} if (schema is not None or reftable["schema"] != self.default_schema_name): table_info["schema"] = reftable["schema"] table_cache[reftable_id] = table_info results = connection.execute(COLUMN_SQL, table_id=reftable_id) reftable_columns = {} for col in results: reftable_columns[col["id"]] = col["name"] column_cache[reftable_id] = reftable_columns reftable = table_cache[reftable_id] reftable_columns = column_cache[reftable_id] constrained_columns = [] referred_columns = [] for i in range(1, r["count"] + 1): constrained_columns.append(columns[r["fokey%i" % i]]) referred_columns.append(reftable_columns[r["refkey%i" % i]]) fk_info = { "constrained_columns": constrained_columns, "referred_schema": reftable["schema"], "referred_table": reftable["name"], "referred_columns": referred_columns, "name": r["name"] } foreign_keys.append(fk_info) return foreign_keys @reflection.cache def get_indexes(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id(connection, table_name, schema, info_cache=kw.get("info_cache")) INDEX_SQL = text(""" SELECT object_name(i.id) AS table_name, i.keycnt AS 'count', i.name AS name, (i.status & 0x2) AS 'unique', index_col(object_name(i.id), i.indid, 1) AS col_1, index_col(object_name(i.id), i.indid, 2) AS col_2, index_col(object_name(i.id), i.indid, 3) AS col_3, index_col(object_name(i.id), i.indid, 4) AS col_4, index_col(object_name(i.id), i.indid, 5) AS col_5, index_col(object_name(i.id), i.indid, 6) AS col_6, index_col(object_name(i.id), i.indid, 7) AS col_7, index_col(object_name(i.id), i.indid, 8) AS col_8, index_col(object_name(i.id), i.indid, 9) AS col_9, index_col(object_name(i.id), i.indid, 10) AS col_10, index_col(object_name(i.id), i.indid, 11) AS col_11, index_col(object_name(i.id), i.indid, 12) AS col_12, index_col(object_name(i.id), i.indid, 13) AS col_13, index_col(object_name(i.id), i.indid, 14) AS col_14, index_col(object_name(i.id), i.indid, 15) AS col_15, index_col(object_name(i.id), i.indid, 16) AS col_16 FROM sysindexes i, sysobjects o WHERE o.id = i.id AND o.id = :table_id AND (i.status & 2048) = 0 AND i.indid BETWEEN 1 AND 254 """) results = connection.execute(INDEX_SQL, table_id=table_id) indexes = [] for r in results: column_names = [] for i in range(1, r["count"]): column_names.append(r["col_%i" % (i,)]) index_info = {"name": r["name"], "unique": bool(r["unique"]), "column_names": column_names} indexes.append(index_info) return indexes @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id(connection, table_name, schema, info_cache=kw.get("info_cache")) PK_SQL = text(""" SELECT object_name(i.id) AS table_name, i.keycnt AS 'count', i.name AS name, index_col(object_name(i.id), i.indid, 1) AS pk_1, index_col(object_name(i.id), i.indid, 2) AS pk_2, index_col(object_name(i.id), i.indid, 3) AS pk_3, index_col(object_name(i.id), i.indid, 4) AS pk_4, index_col(object_name(i.id), i.indid, 5) AS pk_5, index_col(object_name(i.id), i.indid, 6) AS pk_6, index_col(object_name(i.id), i.indid, 7) AS pk_7, index_col(object_name(i.id), i.indid, 8) AS pk_8, index_col(object_name(i.id), i.indid, 9) AS pk_9, index_col(object_name(i.id), i.indid, 10) AS pk_10, index_col(object_name(i.id), i.indid, 11) AS pk_11, index_col(object_name(i.id), i.indid, 12) AS pk_12, index_col(object_name(i.id), i.indid, 13) AS pk_13, index_col(object_name(i.id), i.indid, 14) AS pk_14, index_col(object_name(i.id), i.indid, 15) AS pk_15, index_col(object_name(i.id), i.indid, 16) AS pk_16 FROM sysindexes i, sysobjects o WHERE o.id = i.id AND o.id = :table_id AND (i.status & 2048) = 2048 AND i.indid BETWEEN 1 AND 254 """) results = connection.execute(PK_SQL, table_id=table_id) pks = results.fetchone() results.close() constrained_columns = [] for i in range(1, pks["count"] + 1): constrained_columns.append(pks["pk_%i" % (i,)]) return {"constrained_columns": constrained_columns, "name": pks["name"]} @reflection.cache def get_schema_names(self, connection, **kw): SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u") schemas = connection.execute(SCHEMA_SQL) return [s["name"] for s in schemas] @reflection.cache def get_table_names(self, connection, schema=None, **kw): if schema is None: schema = self.default_schema_name TABLE_SQL = text(""" SELECT o.name AS name FROM sysobjects o JOIN sysusers u ON o.uid = u.uid WHERE u.name = :schema_name AND o.type = 'U' """) if util.py2k: if isinstance(schema, unicode): schema = schema.encode("ascii") tables = connection.execute(TABLE_SQL, schema_name=schema) return [t["name"] for t in tables] @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): if schema is None: schema = self.default_schema_name VIEW_DEF_SQL = text(""" SELECT c.text FROM syscomments c JOIN sysobjects o ON c.id = o.id WHERE o.name = :view_name AND o.type = 'V' """) if util.py2k: if isinstance(view_name, unicode): view_name = view_name.encode("ascii") view = connection.execute(VIEW_DEF_SQL, view_name=view_name) return view.scalar() @reflection.cache def get_view_names(self, connection, schema=None, **kw): if schema is None: schema = self.default_schema_name VIEW_SQL = text(""" SELECT o.name AS name FROM sysobjects o JOIN sysusers u ON o.uid = u.uid WHERE u.name = :schema_name AND o.type = 'V' """) if util.py2k: if isinstance(schema, unicode): schema = schema.encode("ascii") views = connection.execute(VIEW_SQL, schema_name=schema) return [v["name"] for v in views] def has_table(self, connection, table_name, schema=None): try: self.get_table_id(connection, table_name, schema) except exc.NoSuchTableError: return False else: return True
gpl-3.0
asposecells/Aspose_Cells_Cloud
SDKs/Aspose.Cells-Cloud-SDK-for-Python/asposecellscloud/models/FillFormat.py
4
1156
#!/usr/bin/env python class FillFormat(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.""" def __init__(self): """ Attributes: swaggerTypes (dict): The key is attribute name and the value is attribute type. attributeMap (dict): The key is attribute name and the value is json key in definition. """ self.swaggerTypes = { 'Type': 'str', 'SolidFill': 'SolidFill', 'PatternFill': 'PatternFill', 'TextureFill': 'TextureFill', 'GradientFill': 'GradientFill', 'ImageData': 'str' } self.attributeMap = { 'Type': 'Type','SolidFill': 'SolidFill','PatternFill': 'PatternFill','TextureFill': 'TextureFill','GradientFill': 'GradientFill','ImageData': 'ImageData'} self.Type = None # str self.SolidFill = None # SolidFill self.PatternFill = None # PatternFill self.TextureFill = None # TextureFill self.GradientFill = None # GradientFill self.ImageData = None # str
mit
wfxiang08/django185
tests/test_runner/tests.py
13
15537
""" Tests for django test runner """ from __future__ import unicode_literals import unittest from admin_scripts.tests import AdminScriptTestCase from django import db from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management import call_command from django.db.backends.dummy.base import DatabaseCreation from django.test import ( TestCase, TransactionTestCase, mock, skipUnlessDBFeature, ) from django.test.runner import DiscoverRunner, dependency_ordered from django.test.testcases import connections_support_transactions from django.utils import six from django.utils.encoding import force_text from .models import Person class DependencyOrderingTests(unittest.TestCase): def test_simple_dependencies(self): raw = [ ('s1', ('s1_db', ['alpha'])), ('s2', ('s2_db', ['bravo'])), ('s3', ('s3_db', ['charlie'])), ] dependencies = { 'alpha': ['charlie'], 'bravo': ['charlie'], } ordered = dependency_ordered(raw, dependencies=dependencies) ordered_sigs = [sig for sig, value in ordered] self.assertIn('s1', ordered_sigs) self.assertIn('s2', ordered_sigs) self.assertIn('s3', ordered_sigs) self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1')) self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2')) def test_chained_dependencies(self): raw = [ ('s1', ('s1_db', ['alpha'])), ('s2', ('s2_db', ['bravo'])), ('s3', ('s3_db', ['charlie'])), ] dependencies = { 'alpha': ['bravo'], 'bravo': ['charlie'], } ordered = dependency_ordered(raw, dependencies=dependencies) ordered_sigs = [sig for sig, value in ordered] self.assertIn('s1', ordered_sigs) self.assertIn('s2', ordered_sigs) self.assertIn('s3', ordered_sigs) # Explicit dependencies self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1')) self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2')) # Implied dependencies self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1')) def test_multiple_dependencies(self): raw = [ ('s1', ('s1_db', ['alpha'])), ('s2', ('s2_db', ['bravo'])), ('s3', ('s3_db', ['charlie'])), ('s4', ('s4_db', ['delta'])), ] dependencies = { 'alpha': ['bravo', 'delta'], 'bravo': ['charlie'], 'delta': ['charlie'], } ordered = dependency_ordered(raw, dependencies=dependencies) ordered_sigs = [sig for sig, aliases in ordered] self.assertIn('s1', ordered_sigs) self.assertIn('s2', ordered_sigs) self.assertIn('s3', ordered_sigs) self.assertIn('s4', ordered_sigs) # Explicit dependencies self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1')) self.assertLess(ordered_sigs.index('s4'), ordered_sigs.index('s1')) self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2')) self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s4')) # Implicit dependencies self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1')) def test_circular_dependencies(self): raw = [ ('s1', ('s1_db', ['alpha'])), ('s2', ('s2_db', ['bravo'])), ] dependencies = { 'bravo': ['alpha'], 'alpha': ['bravo'], } self.assertRaises(ImproperlyConfigured, dependency_ordered, raw, dependencies=dependencies) def test_own_alias_dependency(self): raw = [ ('s1', ('s1_db', ['alpha', 'bravo'])) ] dependencies = { 'alpha': ['bravo'] } with self.assertRaises(ImproperlyConfigured): dependency_ordered(raw, dependencies=dependencies) # reordering aliases shouldn't matter raw = [ ('s1', ('s1_db', ['bravo', 'alpha'])) ] with self.assertRaises(ImproperlyConfigured): dependency_ordered(raw, dependencies=dependencies) class MockTestRunner(object): def __init__(self, *args, **kwargs): pass MockTestRunner.run_tests = mock.Mock(return_value=[]) class ManageCommandTests(unittest.TestCase): def test_custom_test_runner(self): call_command('test', 'sites', testrunner='test_runner.tests.MockTestRunner') MockTestRunner.run_tests.assert_called_with(('sites',)) def test_bad_test_runner(self): with self.assertRaises(AttributeError): call_command('test', 'sites', testrunner='test_runner.NonExistentRunner') class CustomOptionsTestRunner(DiscoverRunner): def __init__(self, verbosity=1, interactive=True, failfast=True, option_a=None, option_b=None, option_c=None, **kwargs): super(CustomOptionsTestRunner, self).__init__(verbosity=verbosity, interactive=interactive, failfast=failfast) self.option_a = option_a self.option_b = option_b self.option_c = option_c @classmethod def add_arguments(cls, parser): parser.add_argument('--option_a', '-a', action='store', dest='option_a', default='1'), parser.add_argument('--option_b', '-b', action='store', dest='option_b', default='2'), parser.add_argument('--option_c', '-c', action='store', dest='option_c', default='3'), def run_tests(self, test_labels, extra_tests=None, **kwargs): print("%s:%s:%s" % (self.option_a, self.option_b, self.option_c)) class CustomTestRunnerOptionsTests(AdminScriptTestCase): def setUp(self): settings = { 'TEST_RUNNER': '\'test_runner.tests.CustomOptionsTestRunner\'', } self.write_settings('settings.py', sdict=settings) def tearDown(self): self.remove_settings('settings.py') def test_default_options(self): args = ['test', '--settings=test_project.settings'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, '1:2:3') def test_default_and_given_options(self): args = ['test', '--settings=test_project.settings', '--option_b=foo'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, '1:foo:3') def test_option_name_and_value_separated(self): args = ['test', '--settings=test_project.settings', '--option_b', 'foo'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, '1:foo:3') def test_all_options_given(self): args = ['test', '--settings=test_project.settings', '--option_a=bar', '--option_b=foo', '--option_c=31337'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, 'bar:foo:31337') class Ticket17477RegressionTests(AdminScriptTestCase): def setUp(self): self.write_settings('settings.py') def tearDown(self): self.remove_settings('settings.py') def test_ticket_17477(self): """'manage.py help test' works after r16352.""" args = ['help', 'test'] out, err = self.run_manage(args) self.assertNoOutput(err) class Sqlite3InMemoryTestDbs(TestCase): available_apps = [] @unittest.skipUnless(all(db.connections[conn].vendor == 'sqlite' for conn in db.connections), "This is an sqlite-specific issue") def test_transaction_support(self): """Ticket #16329: sqlite3 in-memory test databases""" old_db_connections = db.connections for option_key, option_value in ( ('NAME', ':memory:'), ('TEST', {'NAME': ':memory:'})): try: db.connections = db.ConnectionHandler({ 'default': { 'ENGINE': 'django.db.backends.sqlite3', option_key: option_value, }, 'other': { 'ENGINE': 'django.db.backends.sqlite3', option_key: option_value, }, }) other = db.connections['other'] DiscoverRunner(verbosity=0).setup_databases() msg = "DATABASES setting '%s' option set to sqlite3's ':memory:' value shouldn't interfere with transaction support detection." % option_key # Transaction support should be properly initialized for the 'other' DB self.assertTrue(other.features.supports_transactions, msg) # And all the DBs should report that they support transactions self.assertTrue(connections_support_transactions(), msg) finally: db.connections = old_db_connections class DummyBackendTest(unittest.TestCase): def test_setup_databases(self): """ Test that setup_databases() doesn't fail with dummy database backend. """ runner_instance = DiscoverRunner(verbosity=0) old_db_connections = db.connections try: db.connections = db.ConnectionHandler({}) old_config = runner_instance.setup_databases() runner_instance.teardown_databases(old_config) except Exception as e: self.fail("setup_databases/teardown_databases unexpectedly raised " "an error: %s" % e) finally: db.connections = old_db_connections class AliasedDefaultTestSetupTest(unittest.TestCase): def test_setup_aliased_default_database(self): """ Test that setup_datebases() doesn't fail when 'default' is aliased """ runner_instance = DiscoverRunner(verbosity=0) old_db_connections = db.connections try: db.connections = db.ConnectionHandler({ 'default': { 'NAME': 'dummy' }, 'aliased': { 'NAME': 'dummy' } }) old_config = runner_instance.setup_databases() runner_instance.teardown_databases(old_config) except Exception as e: self.fail("setup_databases/teardown_databases unexpectedly raised " "an error: %s" % e) finally: db.connections = old_db_connections class SetupDatabasesTests(unittest.TestCase): def setUp(self): self._old_db_connections = db.connections self._old_destroy_test_db = DatabaseCreation.destroy_test_db self._old_create_test_db = DatabaseCreation.create_test_db self.runner_instance = DiscoverRunner(verbosity=0) def tearDown(self): DatabaseCreation.create_test_db = self._old_create_test_db DatabaseCreation.destroy_test_db = self._old_destroy_test_db db.connections = self._old_db_connections def test_setup_aliased_databases(self): destroyed_names = [] DatabaseCreation.destroy_test_db = ( lambda self, old_database_name, verbosity=1, keepdb=False, serialize=True: destroyed_names.append(old_database_name) ) DatabaseCreation.create_test_db = ( lambda self, verbosity=1, autoclobber=False, keepdb=False, serialize=True: self._get_test_db_name() ) db.connections = db.ConnectionHandler({ 'default': { 'ENGINE': 'django.db.backends.dummy', 'NAME': 'dbname', }, 'other': { 'ENGINE': 'django.db.backends.dummy', 'NAME': 'dbname', } }) old_config = self.runner_instance.setup_databases() self.runner_instance.teardown_databases(old_config) self.assertEqual(destroyed_names.count('dbname'), 1) def test_destroy_test_db_restores_db_name(self): db.connections = db.ConnectionHandler({ 'default': { 'ENGINE': settings.DATABASES[db.DEFAULT_DB_ALIAS]["ENGINE"], 'NAME': 'xxx_test_database', }, }) # Using the real current name as old_name to not mess with the test suite. old_name = settings.DATABASES[db.DEFAULT_DB_ALIAS]["NAME"] db.connections['default'].creation.destroy_test_db(old_name, verbosity=0, keepdb=True) self.assertEqual(db.connections['default'].settings_dict["NAME"], old_name) def test_serialization(self): serialize = [] DatabaseCreation.create_test_db = ( lambda *args, **kwargs: serialize.append(kwargs.get('serialize')) ) db.connections = db.ConnectionHandler({ 'default': { 'ENGINE': 'django.db.backends.dummy', }, }) self.runner_instance.setup_databases() self.assertEqual(serialize, [True]) def test_serialized_off(self): serialize = [] DatabaseCreation.create_test_db = ( lambda *args, **kwargs: serialize.append(kwargs.get('serialize')) ) db.connections = db.ConnectionHandler({ 'default': { 'ENGINE': 'django.db.backends.dummy', 'TEST': {'SERIALIZE': False}, }, }) self.runner_instance.setup_databases() self.assertEqual(serialize, [False]) class DeprecationDisplayTest(AdminScriptTestCase): # tests for 19546 def setUp(self): settings = { 'DATABASES': '{"default": {"ENGINE":"django.db.backends.sqlite3", "NAME":":memory:"}}' } self.write_settings('settings.py', sdict=settings) def tearDown(self): self.remove_settings('settings.py') def test_runner_deprecation_verbosity_default(self): args = ['test', '--settings=test_project.settings', 'test_runner_deprecation_app'] out, err = self.run_django_admin(args) self.assertIn("Ran 1 test", force_text(err)) six.assertRegex(self, err, r"RemovedInDjango\d\dWarning: warning from test") six.assertRegex(self, err, r"RemovedInDjango\d\dWarning: module-level warning from deprecation_app") def test_runner_deprecation_verbosity_zero(self): args = ['test', '--settings=test_project.settings', '--verbosity=0', 'test_runner_deprecation_app'] out, err = self.run_django_admin(args) self.assertIn("Ran 1 test", err) self.assertNotIn("warning from test", err) class AutoIncrementResetTest(TransactionTestCase): """ Here we test creating the same model two times in different test methods, and check that both times they get "1" as their PK value. That is, we test that AutoField values start from 1 for each transactional test case. """ available_apps = ['test_runner'] reset_sequences = True @skipUnlessDBFeature('supports_sequence_reset') def test_autoincrement_reset1(self): p = Person.objects.create(first_name='Jack', last_name='Smith') self.assertEqual(p.pk, 1) @skipUnlessDBFeature('supports_sequence_reset') def test_autoincrement_reset2(self): p = Person.objects.create(first_name='Jack', last_name='Smith') self.assertEqual(p.pk, 1)
bsd-3-clause
pretix/cleanerversion
versions/util/postgresql.py
1
6641
from __future__ import absolute_import from django.db import connection as default_connection from versions.models import VersionedForeignKey from .helper import database_connection, versionable_models def index_exists(cursor, index_name): """ Checks if an index with the given name exists in the database :param cursor: database connection cursor :param index_name: string :return: boolean """ cursor.execute("SELECT COUNT(1) FROM pg_indexes WHERE indexname = %s", [index_name]) return cursor.fetchone()[0] > 0 def remove_uuid_id_like_indexes(app_name, database=None): """ Remove all of varchar_pattern_ops indexes that django created for uuid columns. A search is never done with a filter of the style (uuid__like='1ae3c%'), so all such indexes can be removed from Versionable models. This will only try to remove indexes if they exist in the database, so it should be safe to run in a post_migrate signal handler. Running it several times should leave the database in the same state as running it once. :param str app_name: application name whose Versionable models will be acted on. :param str database: database alias to use. If None, use default connection. :return: number of indexes removed :rtype: int """ removed_indexes = 0 with database_connection(database).cursor() as cursor: for model in versionable_models(app_name, include_auto_created=True): indexes = select_uuid_like_indexes_on_table(model, cursor) if indexes: index_list = ','.join(['"%s"' % r[0] for r in indexes]) cursor.execute("DROP INDEX %s" % index_list) removed_indexes += len(indexes) return removed_indexes def get_uuid_like_indexes_on_table(model): """ Gets a list of database index names for the given model for the uuid-containing fields that have had a like-index created on them. :param model: Django model :return: list of database rows; the first field of each row is an index name """ with default_connection.cursor() as c: indexes = select_uuid_like_indexes_on_table(model, c) return indexes def select_uuid_like_indexes_on_table(model, cursor): """ Gets a list of database index names for the given model for the uuid-containing fields that have had a like-index created on them. :param model: Django model :param cursor: database connection cursor :return: list of database rows; the first field of each row is an index name """ # VersionedForeignKey fields as well as the id fields have these useless like indexes field_names = ["'%s'" % f.column for f in model._meta.fields if isinstance(f, VersionedForeignKey)] field_names.append("'id'") sql = """ select i.relname as index_name from pg_class t, pg_class i, pg_index ix, pg_attribute a where t.oid = ix.indrelid and i.oid = ix.indexrelid and a.attrelid = t.oid and a.attnum = ANY(ix.indkey) and t.relkind = 'r' and t.relname = '{0}' and a.attname in ({1}) and i.relname like '%_like' """.format(model._meta.db_table, ','.join(field_names)) cursor.execute(sql) return cursor.fetchall() def create_current_version_unique_indexes(app_name, database=None): """ Add unique indexes for models which have a VERSION_UNIQUE attribute. These must be defined as partially unique indexes, which django does not support. The unique indexes are defined so that no two *current* versions can have the same value. This will only try to create indexes if they do not exist in the database, so it should be safe to run in a post_migrate signal handler. Running it several times should leave the database in the same state as running it once. :param str app_name: application name whose Versionable models will be acted on. :param str database: database alias to use. If None, use default connection. :return: number of partial unique indexes created :rtype: int """ indexes_created = 0 connection = database_connection(database) with connection.cursor() as cursor: for model in versionable_models(app_name): unique_field_groups = getattr(model, 'VERSION_UNIQUE', None) if not unique_field_groups: continue table_name = model._meta.db_table for group in unique_field_groups: col_prefixes = [] columns = [] for field in group: column = model._meta.get_field(field).column col_prefixes.append(column[0:3]) columns.append(column) index_name = '%s_%s_%s_v_uniq' % (app_name, table_name, '_'.join(col_prefixes)) if not index_exists(cursor, index_name): cursor.execute("CREATE UNIQUE INDEX %s ON %s(%s) WHERE version_end_date IS NULL" % (index_name, table_name, ','.join(columns))) indexes_created += 1 return indexes_created def create_current_version_unique_identity_indexes(app_name, database=None): """ Add partial unique indexes for the the identity column of versionable models. This enforces that no two *current* versions can have the same identity. This will only try to create indexes if they do not exist in the database, so it should be safe to run in a post_migrate signal handler. Running it several times should leave the database in the same state as running it once. :param str app_name: application name whose Versionable models will be acted on. :param str database: database alias to use. If None, use default connection. :return: number of partial unique indexes created :rtype: int """ indexes_created = 0 connection = database_connection(database) with connection.cursor() as cursor: for model in versionable_models(app_name): table_name = model._meta.db_table index_name = '%s_%s_identity_v_uniq' % (app_name, table_name) if not index_exists(cursor, index_name): cursor.execute("CREATE UNIQUE INDEX %s ON %s(%s) WHERE version_end_date IS NULL" % (index_name, table_name, 'identity')) indexes_created += 1 return indexes_created
apache-2.0
sudosurootdev/external_chromium_org
build/go/go.py
26
1872
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This script invokes the go build tool. Must be called as follows: python go.py <go-binary> <build directory> <output file> <src directory> <CGO_CFLAGS> <CGO_LDFLAGS> <go-binary options> eg. python go.py /usr/lib/google-golang/bin/go out/build out/a.out .. "-I." "-L. -ltest" test -c test/test.go """ import argparse import os import shutil import sys def main(): parser = argparse.ArgumentParser() parser.add_argument('go_binary') parser.add_argument('build_directory') parser.add_argument('output_file') parser.add_argument('src_root') parser.add_argument('cgo_cflags') parser.add_argument('cgo_ldflags') parser.add_argument('go_option', nargs='*') args = parser.parse_args() go_binary = args.go_binary build_dir = args.build_directory out_file = os.path.abspath(args.output_file) # The src directory specified is relative. We need this as an absolute path. src_root = os.path.abspath(args.src_root) # GOPATH must be absolute, and point to one directory up from |src_Root| go_path = os.path.abspath(os.path.join(src_root, "..")) go_options = args.go_option try: shutil.rmtree(build_dir, True) os.mkdir(build_dir) except: pass old_directory = os.getcwd() os.chdir(build_dir) os.environ["GOPATH"] = go_path os.environ["CGO_CFLAGS"] = args.cgo_cflags os.environ["CGO_LDFLAGS"] = args.cgo_ldflags os.system("%s %s" % (go_binary, " ".join(go_options))) out_files = [ f for f in os.listdir(".") if os.path.isfile(f)] if (len(out_files) > 0): shutil.move(out_files[0], out_file) os.chdir(old_directory) try: shutil.rmtree(build_dir, True) except: pass if __name__ == '__main__': sys.exit(main())
bsd-3-clause
moiseslorap/RIT
Computer Science 1/Labs/lab9/rooms.py
1
3862
from rit_lib import * from slList import * class Rooms(struct): """ The HashTable data structure contains a collection of values where each value is located by a hashable key. No two values may have the same key, but more than one key may have the same value. table is the list holding the hash table size is the number of elements in occupying the hashtable. """ _slots = ((list, 'table'), (int, 'size')) def HashTableToStr(self): """ HashTableToStr: HashTable -> String """ result = "" for i in range(len(self.table)): e = self.table[i] if not e == None: result += str(i) + ": " result += e.EntryToStr() + "\n" return result def hash_function(self, name): """ hash_function: K NatNum -> NatNum Compute a hash of the val string that is in [0 ... n). """ hashval = 0 hashmult = 1 for letter in name: hashval += (ord(letter) - ord('a')) hashmult *= (ord(letter) - ord('a')) roomNumber = abs(hashmult - hashval) % len(self.table) return roomNumber def keys(self): """ keys: HashTable(K, V) -> List(K) Return a list of keys in the given hashTable. """ result = [] for entry in self.table: if entry != None: result.append(entry.key) return result def has(self, key): """ has: HashTable(K, V) K -> Boolean Return True iff hTable has an entry with the given key. """ index = self.hash_function(key) startIndex = index # We must make sure we don't go in circles. while self.table[ index ] != None and self.table[ index ].key != key: index = (index + 1) % len(self.table) if index == startIndex: return False return self.table[ index ] != None def put(self, key, value): """ put: HashTable(K, V) K V -> Boolean Using the given hash table, set the given key to the given value. If the key already exists, the given value will replace the previous one already in the table. If the table is full, an Exception is raised. """ index = self.hash_function(key) startIndex = index # We must make sure we don't go in circles. while self.table[ index ] != None and self.table[ index ].key != key: index = (index + 1) % len(self.table) if index == startIndex: raise Exception("Hash table is full.") if self.table[ index ] == None: self.table[ index ] = Entry(key, value) self.size += 1 else: self.table[ index ].value = value return True def get( self, key): """ get: HashTable(K, V) K -> V Return the value associated with the given key in the given hash table. Precondition: self.has(key) """ index = self.hash_function(key) return self.table[ index ].value def createRoomsHashTable(capacity=100): """ createHashTable: NatNum? -> HashTable """ if capacity < 2: capacity = 2 aHashTable = Rooms([None for _ in range(capacity)], 0) return aHashTable class Entry(struct): """ A class used to hold key/value pairs. """ _slots = ((object, "key"), (object, "value")) def EntryToStr( self ): """ EntryToStr: Entry -> String return the string representation of the entry. """ return "(" + str(self.key) + ", " + str(self.value) + ")"
mit
pjadzinsky/keras
keras/layers/containers.py
10
8817
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function import theano.tensor as T from ..layers.core import Layer, Merge from ..utils.theano_utils import ndim_tensor from six.moves import range class Sequential(Layer): ''' Simple linear stack of layers. inherited from Layer: - get_params - get_output_mask - supports_masked_input ''' def __init__(self, layers=[]): self.layers = [] self.params = [] self.regularizers = [] self.constraints = [] self.updates = [] for layer in layers: self.add(layer) def set_previous(self, layer): self.layers[0].previous = layer def add(self, layer): self.layers.append(layer) if len(self.layers) > 1: self.layers[-1].set_previous(self.layers[-2]) if not hasattr(self.layers[0], 'input'): self.set_input() layer.init_updates() params, regularizers, constraints, updates = layer.get_params() self.params += params self.regularizers += regularizers self.constraints += constraints self.updates += updates def get_output(self, train=False): return self.layers[-1].get_output(train) def set_input(self): for l in self.layers: if hasattr(l, 'input'): ndim = l.input.ndim self.layers[0].input = ndim_tensor(ndim) break def get_input(self, train=False): if not hasattr(self.layers[0], 'input'): self.set_input() return self.layers[0].get_input(train) @property def input(self): return self.get_input() def get_weights(self): weights = [] for layer in self.layers: weights += layer.get_weights() return weights def set_weights(self, weights): for i in range(len(self.layers)): nb_param = len(self.layers[i].params) self.layers[i].set_weights(weights[:nb_param]) weights = weights[nb_param:] def get_config(self): return {"name": self.__class__.__name__, "layers": [layer.get_config() for layer in self.layers]} class Graph(Layer): ''' Implement a NN graph with arbitrary layer connections, arbitrary number of inputs and arbitrary number of outputs. Note: Graph can only be used as a layer (connect, input, get_input, get_output) when it has exactly one input and one output. inherited from Layer: - get_params - get_output_mask - supports_masked_input - get_weights - set_weights ''' def __init__(self): self.namespace = set() # strings self.nodes = {} # layer-like self.inputs = {} # layer-like self.input_order = [] # strings self.outputs = {} # layer-like self.output_order = [] # strings self.input_config = [] # dicts self.output_config = [] # dicts self.node_config = [] # dicts self.params = [] self.regularizers = [] self.constraints = [] self.updates = [] @property def nb_input(self): return len(self.inputs) @property def nb_output(self): return len(self.outputs) def set_previous(self, layer, connection_map={}): if self.nb_input != layer.nb_output: raise Exception('Cannot connect layers: input count does not match output count.') if self.nb_input == 1: self.inputs[self.input_order[0]].set_previous(layer) else: if not connection_map: raise Exception('Cannot attach multi-input layer: no connection_map provided.') for k, v in connection_map.items(): if k in self.inputs and v in layer.outputs: self.inputs[k].set_previous(layer.outputs[v]) else: raise Exception('Invalid connection map.') def get_input(self, train=False): if len(self.inputs) == len(self.outputs) == 1: return self.inputs[self.input_order[0]].get_input(train) else: return dict([(k, v.get_input(train)) for k, v in self.inputs.items()]) @property def input(self): return self.get_input() def get_output(self, train=False): if len(self.inputs) == len(self.outputs) == 1: return self.outputs[self.output_order[0]].get_output(train) else: return dict([(k, v.get_output(train)) for k, v in self.outputs.items()]) def add_input(self, name, ndim=2, dtype='float'): if name in self.namespace: raise Exception('Duplicate node identifier: ' + name) self.namespace.add(name) self.input_order.append(name) layer = Layer() # empty layer if dtype == 'float': layer.input = ndim_tensor(ndim) else: if ndim == 2: layer.input = T.imatrix() else: raise Exception('Type "int" can only be used with ndim==2 (Embedding).') layer.input.name = name self.inputs[name] = layer self.input_config.append({'name': name, 'ndim': ndim, 'dtype': dtype}) def add_node(self, layer, name, input=None, inputs=[], merge_mode='concat', concat_axis=-1, create_output=False): if hasattr(layer, 'set_name'): layer.set_name(name) if name in self.namespace: raise Exception('Duplicate node identifier: ' + name) if input: if input not in self.namespace: raise Exception('Unknown node/input identifier: ' + input) if input in self.nodes: layer.set_previous(self.nodes[input]) elif input in self.inputs: layer.set_previous(self.inputs[input]) if inputs: to_merge = [] for n in inputs: if n in self.nodes: to_merge.append(self.nodes[n]) elif n in self.inputs: to_merge.append(self.inputs[n]) else: raise Exception('Unknown identifier: ' + n) merge = Merge(to_merge, mode=merge_mode, concat_axis=concat_axis) layer.set_previous(merge) self.namespace.add(name) self.nodes[name] = layer self.node_config.append({'name': name, 'input': input, 'inputs': inputs, 'merge_mode': merge_mode, 'concat_axis': concat_axis, 'create_output': create_output}) layer.init_updates() params, regularizers, constraints, updates = layer.get_params() self.params += params self.regularizers += regularizers self.constraints += constraints self.updates += updates if create_output: self.add_output(name, input=name) def add_output(self, name, input=None, inputs=[], merge_mode='concat', concat_axis=-1): if name in self.output_order: raise Exception('Duplicate output identifier: ' + name) if input: if input not in self.namespace: raise Exception('Unknown node/input identifier: ' + input) if input in self.nodes: self.outputs[name] = self.nodes[input] elif input in self.inputs: self.outputs[name] = self.inputs[input] if inputs: to_merge = [] for n in inputs: if n not in self.nodes: raise Exception('Unknown identifier: ' + n) to_merge.append(self.nodes[n]) merge = Merge(to_merge, mode=merge_mode, concat_axis=concat_axis) self.outputs[name] = merge self.output_order.append(name) self.output_config.append({'name': name, 'input': input, 'inputs': inputs, 'merge_mode': merge_mode, 'concat_axis': concat_axis}) def get_config(self): return {"name": self.__class__.__name__, "input_config": self.input_config, "node_config": self.node_config, "output_config": self.output_config, "input_order": self.input_order, "output_order": self.output_order, "nodes": dict([(c["name"], self.nodes[c["name"]].get_config()) for c in self.node_config])}
mit
jehoffmann/l4linux
tools/perf/scripts/python/sctop.py
11180
1924
# system call top # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() time.sleep(interval)
gpl-2.0
erikrose/dxr
tests/test_multi_tree/test_multi_tree.py
13
1146
"""Tests specific to multiple-tree instances""" from dxr.testing import DxrInstanceTestCase from nose.tools import eq_ class ParallelControllerTests(DxrInstanceTestCase): """Tests for the /parallel/ controller""" def test_existent_parallel_file(self): """Make sure the /parallel controller redirects to an existent parallel file.""" response = self.client().get('/code/parallel/folder/nested_folder/hai') eq_(response.headers['Location'], 'http://localhost/code/source/folder/nested_folder/hai') def test_existent_parallel_folder(self): """Make sure the /parallel controller redirects to an existent parallel folder.""" response = self.client().get('/code/parallel/folder/') eq_(response.headers['Location'], 'http://localhost/code/source/folder/') def test_nonexistent_parallel(self): """Make sure the /parallel controller redirects to an existent parallel file or folder.""" response = self.client().get('/code/parallel/folder/nope') eq_(response.headers['Location'], 'http://localhost/code/source/')
mit
sestrella/ansible
lib/ansible/modules/storage/netapp/na_ontap_svm_options.py
59
5200
#!/usr/bin/python # (c) 2018, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' short_description: NetApp ONTAP Modify SVM Options author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com> description: - Modify ONTAP SVM Options - Only Options that appear on "vserver options show" can be set extends_documentation_fragment: - netapp.na_ontap module: na_ontap_svm_options version_added: "2.7" options: name: description: - Name of the option. value: description: - Value of the option. - Value must be in quote vserver: description: - The name of the vserver to which this option belongs to. required: True ''' EXAMPLES = """ - name: Set SVM Options na_ontap_svm_options: vserver: "{{ netapp_vserver_name }}" hostname: "{{ netapp_hostname }}" username: "{{ netapp_username }}" password: "{{ netapp_password }}" name: snmp.enable value: 'on' """ RETURN = """ """ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native import ansible.module_utils.netapp as netapp_utils from ansible.module_utils.netapp_module import NetAppModule HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() class NetAppONTAPSvnOptions(object): def __init__(self): self.argument_spec = netapp_utils.na_ontap_host_argument_spec() self.argument_spec.update(dict( name=dict(required=False, type="str", default=None), value=dict(required=False, type='str', default=None), vserver=dict(required=True, type='str') )) self.module = AnsibleModule( argument_spec=self.argument_spec, supports_check_mode=True ) self.na_helper = NetAppModule() self.parameters = self.na_helper.set_parameters(self.module.params) if HAS_NETAPP_LIB is False: self.module.fail_json(msg="the python NetApp-Lib module is required") else: self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver']) return def set_options(self): """ Set a specific option :return: None """ option_obj = netapp_utils.zapi.NaElement("options-set") option_obj.add_new_child('name', self.parameters['name']) option_obj.add_new_child('value', self.parameters['value']) try: result = self.server.invoke_successfully(option_obj, True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg="Error setting options: %s" % to_native(error), exception=traceback.format_exc()) def list_options(self): """ List all Options on the Vserver :return: None """ option_obj = netapp_utils.zapi.NaElement("options-list-info") try: result = self.server.invoke_successfully(option_obj, True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg="Error getting options: %s" % to_native(error), exception=traceback.format_exc()) def is_option_set(self): """ Checks to see if an option is set or not :return: If option is set return True, else return False """ option_obj = netapp_utils.zapi.NaElement("options-get-iter") options_info = netapp_utils.zapi.NaElement("option-info") if self.parameters.get('name') is not None: options_info.add_new_child("name", self.parameters['name']) if self.parameters.get('value') is not None: options_info.add_new_child("value", self.parameters['value']) if "vserver" in self.parameters.keys(): if self.parameters['vserver'] is not None: options_info.add_new_child("vserver", self.parameters['vserver']) query = netapp_utils.zapi.NaElement("query") query.add_child_elem(options_info) option_obj.add_child_elem(query) try: result = self.server.invoke_successfully(option_obj, True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg="Error finding option: %s" % to_native(error), exception=traceback.format_exc()) if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1: return True return False def apply(self): changed = False netapp_utils.ems_log_event("na_ontap_svm_options", self.server) is_set = self.is_option_set() if not is_set: self.set_options() changed = True self.module.exit_json(changed=changed) def main(): """ Execute action from playbook :return: none """ cg_obj = NetAppONTAPSvnOptions() cg_obj.apply() if __name__ == '__main__': main()
gpl-3.0
skarra/PRS
libs/sqlalchemy/dialects/sqlite/__init__.py
2
1042
# sqlite/__init__.py # Copyright (C) 2005-2019 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import base # noqa from . import pysqlcipher # noqa from . import pysqlite # noqa from .base import BLOB from .base import BOOLEAN from .base import CHAR from .base import DATE from .base import DATETIME from .base import DECIMAL from .base import FLOAT from .base import INTEGER from .base import JSON from .base import NUMERIC from .base import REAL from .base import SMALLINT from .base import TEXT from .base import TIME from .base import TIMESTAMP from .base import VARCHAR # default dialect base.dialect = dialect = pysqlite.dialect __all__ = ( "BLOB", "BOOLEAN", "CHAR", "DATE", "DATETIME", "DECIMAL", "FLOAT", "INTEGER", "JSON", "NUMERIC", "SMALLINT", "TEXT", "TIME", "TIMESTAMP", "VARCHAR", "REAL", "dialect", )
agpl-3.0
ntuecon/server
pyenv/Lib/site-packages/twisted/words/test/test_ircsupport.py
13
10702
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.words.im.ircsupport}. """ from twisted.test.proto_helpers import StringTransport from twisted.words.im.basechat import ChatUI, Conversation, GroupConversation from twisted.words.im.ircsupport import IRCAccount, IRCProto from twisted.words.im.locals import OfflineError from twisted.words.test.test_irc import IRCTestCase class StubConversation(Conversation): def show(self): pass def showMessage(self, message, metadata): self.message = message self.metadata = metadata class StubGroupConversation(GroupConversation): def setTopic(self, topic, nickname): self.topic = topic self.topicSetBy = nickname def show(self): pass def showGroupMessage(self, sender, text, metadata=None): self.metadata = metadata self.text = text self.metadata = metadata class StubChatUI(ChatUI): def getConversation(self, group, Class=StubConversation, stayHidden=0): return ChatUI.getGroupConversation(self, group, Class, stayHidden) def getGroupConversation(self, group, Class=StubGroupConversation, stayHidden=0): return ChatUI.getGroupConversation(self, group, Class, stayHidden) class IRCProtoTests(IRCTestCase): """ Tests for L{IRCProto}. """ def setUp(self): self.account = IRCAccount( "Some account", False, "alice", None, "example.com", 6667) self.proto = IRCProto(self.account, StubChatUI(), None) self.transport = StringTransport() def test_login(self): """ When L{IRCProto} is connected to a transport, it sends I{NICK} and I{USER} commands with the username from the account object. """ self.proto.makeConnection(self.transport) self.assertEqualBufferValue( self.transport.value(), "NICK alice\r\n" "USER alice foo bar :Twisted-IM user\r\n") def test_authenticate(self): """ If created with an account with a password, L{IRCProto} sends a I{PASS} command before the I{NICK} and I{USER} commands. """ self.account.password = "secret" self.proto.makeConnection(self.transport) self.assertEqualBufferValue( self.transport.value(), "PASS secret\r\n" "NICK alice\r\n" "USER alice foo bar :Twisted-IM user\r\n") def test_channels(self): """ If created with an account with a list of channels, L{IRCProto} joins each of those channels after registering. """ self.account.channels = ['#foo', '#bar'] self.proto.makeConnection(self.transport) self.assertEqualBufferValue( self.transport.value(), "NICK alice\r\n" "USER alice foo bar :Twisted-IM user\r\n" "JOIN #foo\r\n" "JOIN #bar\r\n") def test_isupport(self): """ L{IRCProto} can interpret I{ISUPPORT} (I{005}) messages from the server and reflect their information in its C{supported} attribute. """ self.proto.makeConnection(self.transport) self.proto.dataReceived( ":irc.example.com 005 alice MODES=4 CHANLIMIT=#:20\r\n") self.assertEqual(4, self.proto.supported.getFeature("MODES")) def test_nick(self): """ IRC NICK command changes the nickname of a user. """ self.proto.makeConnection(self.transport) self.proto.dataReceived(":alice JOIN #group1\r\n") self.proto.dataReceived(":alice1 JOIN #group1\r\n") self.proto.dataReceived(":alice1 NICK newnick\r\n") self.proto.dataReceived(":alice3 NICK newnick3\r\n") self.assertIn("newnick", self.proto._ingroups) self.assertNotIn("alice1", self.proto._ingroups) def test_part(self): """ IRC PART command removes a user from an IRC channel. """ self.proto.makeConnection(self.transport) self.proto.dataReceived(":alice1 JOIN #group1\r\n") self.assertIn("group1", self.proto._ingroups["alice1"]) self.assertNotIn("group2", self.proto._ingroups["alice1"]) self.proto.dataReceived(":alice PART #group1\r\n") self.proto.dataReceived(":alice1 PART #group1\r\n") self.proto.dataReceived(":alice1 PART #group2\r\n") self.assertNotIn("group1", self.proto._ingroups["alice1"]) self.assertNotIn("group2", self.proto._ingroups["alice1"]) def test_quit(self): """ IRC QUIT command removes a user from all IRC channels. """ self.proto.makeConnection(self.transport) self.proto.dataReceived(":alice1 JOIN #group1\r\n") self.assertIn("group1", self.proto._ingroups["alice1"]) self.assertNotIn("group2", self.proto._ingroups["alice1"]) self.proto.dataReceived(":alice1 JOIN #group3\r\n") self.assertIn("group3", self.proto._ingroups["alice1"]) self.proto.dataReceived(":alice1 QUIT\r\n") self.assertTrue(len(self.proto._ingroups["alice1"]) == 0) self.proto.dataReceived(":alice3 QUIT\r\n") def test_topic(self): """ IRC TOPIC command changes the topic of an IRC channel. """ self.proto.makeConnection(self.transport) self.proto.dataReceived(":alice1 JOIN #group1\r\n") self.proto.dataReceived(":alice1 TOPIC #group1 newtopic\r\n") groupConversation = self.proto.getGroupConversation("group1") self.assertEqual(groupConversation.topic, "newtopic") self.assertEqual(groupConversation.topicSetBy, "alice1") def test_privmsg(self): """ PRIVMSG sends a private message to a user or channel. """ self.proto.makeConnection(self.transport) self.proto.dataReceived(":alice1 PRIVMSG t2 test_message_1\r\n") conversation = self.proto.chat.getConversation( self.proto.getPerson("alice1")) self.assertEqual(conversation.message, "test_message_1") self.proto.dataReceived(":alice1 PRIVMSG #group1 test_message_2\r\n") groupConversation = self.proto.getGroupConversation("group1") self.assertEqual(groupConversation.text, "test_message_2") self.proto.setNick("alice") self.proto.dataReceived(":alice PRIVMSG #foo test_message_3\r\n") groupConversation = self.proto.getGroupConversation("foo") self.assertFalse(hasattr(groupConversation, "text")) conversation = self.proto.chat.getConversation( self.proto.getPerson("alice")) self.assertFalse(hasattr(conversation, "message")) def test_action(self): """ CTCP ACTION to a user or channel. """ self.proto.makeConnection(self.transport) self.proto.dataReceived(":alice1 PRIVMSG alice1 :\01ACTION smiles\01\r\n") conversation = self.proto.chat.getConversation( self.proto.getPerson("alice1")) self.assertEqual(conversation.message, "smiles") self.proto.dataReceived(":alice1 PRIVMSG #group1 :\01ACTION laughs\01\r\n") groupConversation = self.proto.getGroupConversation("group1") self.assertEqual(groupConversation.text, "laughs") self.proto.setNick("alice") self.proto.dataReceived(":alice PRIVMSG #group1 :\01ACTION cries\01\r\n") groupConversation = self.proto.getGroupConversation("foo") self.assertFalse(hasattr(groupConversation, "text")) conversation = self.proto.chat.getConversation( self.proto.getPerson("alice")) self.assertFalse(hasattr(conversation, "message")) def test_rplNamreply(self): """ RPL_NAMREPLY server response (353) lists all the users in a channel. RPL_ENDOFNAMES server response (363) is sent at the end of RPL_NAMREPLY to indicate that there are no more names. """ self.proto.makeConnection(self.transport) self.proto.dataReceived( ":example.com 353 z3p = #bnl :pSwede Dan- SkOyg @MrOp +MrPlus\r\n") expectedInGroups = {'Dan-': ['bnl'], 'pSwede': ['bnl'], 'SkOyg': ['bnl'], 'MrOp': ['bnl'], 'MrPlus': ['bnl']} expectedNamReplies = { 'bnl': ['pSwede', 'Dan-', 'SkOyg', 'MrOp', 'MrPlus']} self.assertEqual(expectedInGroups, self.proto._ingroups) self.assertEqual(expectedNamReplies, self.proto._namreplies) self.proto.dataReceived( ":example.com 366 alice #bnl :End of /NAMES list\r\n") self.assertEqual({}, self.proto._namreplies) groupConversation = self.proto.getGroupConversation("bnl") self.assertEqual(expectedNamReplies['bnl'], groupConversation.members) def test_rplTopic(self): """ RPL_TOPIC server response (332) is sent when a channel's topic is changed """ self.proto.makeConnection(self.transport) self.proto.dataReceived( ":example.com 332 alice, #foo :Some random topic\r\n") self.assertEqual("Some random topic", self.proto._topics["foo"]) def test_sendMessage(self): """ L{IRCPerson.sendMessage} """ self.proto.makeConnection(self.transport) person = self.proto.getPerson("alice") self.assertRaises(OfflineError, person.sendMessage, "Some message") person.account.client = self.proto self.transport.clear() person.sendMessage("Some message 2") self.assertEqual(self.transport.io.getvalue(), b'PRIVMSG alice :Some message 2\r\n') self.transport.clear() person.sendMessage("smiles", {"style": "emote"}) self.assertEqual(self.transport.io.getvalue(), b'PRIVMSG alice :\x01ACTION smiles\x01\r\n') def test_sendGroupMessage(self): """ L{IRCGroup.sendGroupMessage} """ self.proto.makeConnection(self.transport) group = self.proto.chat.getGroup("#foo", self.proto) self.assertRaises(OfflineError, group.sendGroupMessage, "Some message") group.account.client = self.proto self.transport.clear() group.sendGroupMessage("Some message 2") self.assertEqual(self.transport.io.getvalue(), b'PRIVMSG #foo :Some message 2\r\n') self.transport.clear() group.sendGroupMessage("smiles", {"style": "emote"}) self.assertEqual(self.transport.io.getvalue(), b'PRIVMSG #foo :\x01ACTION smiles\x01\r\n')
bsd-3-clause
tuhangdi/django
django/contrib/auth/tokens.py
433
2803
from datetime import date from django.conf import settings from django.utils import six from django.utils.crypto import constant_time_compare, salted_hmac from django.utils.http import base36_to_int, int_to_base36 class PasswordResetTokenGenerator(object): """ Strategy object used to generate and check tokens for the password reset mechanism. """ key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator" def make_token(self, user): """ Returns a token that can be used once to do a password reset for the given user. """ return self._make_token_with_timestamp(user, self._num_days(self._today())) def check_token(self, user, token): """ Check that a password reset token is correct for a given user. """ # Parse the token try: ts_b36, hash = token.split("-") except ValueError: return False try: ts = base36_to_int(ts_b36) except ValueError: return False # Check that the timestamp/uid has not been tampered with if not constant_time_compare(self._make_token_with_timestamp(user, ts), token): return False # Check the timestamp is within limit if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS: return False return True def _make_token_with_timestamp(self, user, timestamp): # timestamp is number of days since 2001-1-1. Converted to # base 36, this gives us a 3 digit string until about 2121 ts_b36 = int_to_base36(timestamp) # By hashing on the internal state of the user and using state # that is sure to change (the password salt will change as soon as # the password is set, at least for current Django auth, and # last_login will also change), we produce a hash that will be # invalid as soon as it is used. # We limit the hash to 20 chars to keep URL short hash = salted_hmac( self.key_salt, self._make_hash_value(user, timestamp), ).hexdigest()[::2] return "%s-%s" % (ts_b36, hash) def _make_hash_value(self, user, timestamp): # Ensure results are consistent across DB backends login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None) return ( six.text_type(user.pk) + user.password + six.text_type(login_timestamp) + six.text_type(timestamp) ) def _num_days(self, dt): return (dt - date(2001, 1, 1)).days def _today(self): # Used for mocking in tests return date.today() default_token_generator = PasswordResetTokenGenerator()
bsd-3-clause
jbassen/edx-platform
common/djangoapps/util/password_policy_validators.py
113
3665
# pylint: disable=no-member """ This file exposes a number of password complexity validators which can be optionally added to account creation This file was inspired by the django-passwords project at https://github.com/dstufft/django-passwords authored by dstufft (https://github.com/dstufft) """ from __future__ import division import string # pylint: disable=deprecated-module from django.core.exceptions import ValidationError from django.utils.translation import ugettext_lazy as _ from django.conf import settings import nltk def validate_password_length(value): """ Validator that enforces minimum length of a password """ message = _("Invalid Length ({0})") code = "length" min_length = getattr(settings, 'PASSWORD_MIN_LENGTH', None) max_length = getattr(settings, 'PASSWORD_MAX_LENGTH', None) if min_length and len(value) < min_length: raise ValidationError(message.format(_("must be {0} characters or more").format(min_length)), code=code) elif max_length and len(value) > max_length: raise ValidationError(message.format(_("must be {0} characters or less").format(max_length)), code=code) def validate_password_complexity(value): """ Validator that enforces minimum complexity """ message = _("Must be more complex ({0})") code = "complexity" complexities = getattr(settings, "PASSWORD_COMPLEXITY", None) if complexities is None: return uppercase, lowercase, digits, non_ascii, punctuation = set(), set(), set(), set(), set() for character in value: if character.isupper(): uppercase.add(character) elif character.islower(): lowercase.add(character) elif character.isdigit(): digits.add(character) elif character in string.punctuation: punctuation.add(character) else: non_ascii.add(character) words = set(value.split()) errors = [] if len(uppercase) < complexities.get("UPPER", 0): errors.append(_("must contain {0} or more uppercase characters").format(complexities["UPPER"])) if len(lowercase) < complexities.get("LOWER", 0): errors.append(_("must contain {0} or more lowercase characters").format(complexities["LOWER"])) if len(digits) < complexities.get("DIGITS", 0): errors.append(_("must contain {0} or more digits").format(complexities["DIGITS"])) if len(punctuation) < complexities.get("PUNCTUATION", 0): errors.append(_("must contain {0} or more punctuation characters").format(complexities["PUNCTUATION"])) if len(non_ascii) < complexities.get("NON ASCII", 0): errors.append(_("must contain {0} or more non ascii characters").format(complexities["NON ASCII"])) if len(words) < complexities.get("WORDS", 0): errors.append(_("must contain {0} or more unique words").format(complexities["WORDS"])) if errors: raise ValidationError(message.format(u', '.join(errors)), code=code) def validate_password_dictionary(value): """ Insures that the password is not too similar to a defined set of dictionary words """ password_max_edit_distance = getattr(settings, "PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD", None) password_dictionary = getattr(settings, "PASSWORD_DICTIONARY", None) if password_max_edit_distance and password_dictionary: for word in password_dictionary: distance = nltk.metrics.distance.edit_distance(value, word) if distance <= password_max_edit_distance: raise ValidationError(_("Too similar to a restricted dictionary word."), code="dictionary_word")
agpl-3.0
SuperScottz/WALinuxAgent
tests/test_util.py
9
2503
# Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from env import waagent import sys from tests.tools import * SampleInterfaceInfo="""\ eth0 Link encap:Ethernet HWaddr ff:ff:ff:ff:ff:ff inet addr:10.94.20.249 Bcast:10.94.23.255 Mask:255.255.252.0 inet6 addr: fe80::215:5dff:fe5f:bf03/64 Scope:Link UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 RX packets:3789880 errors:0 dropped:0 overruns:0 frame:0 TX packets:80973 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:1000 RX bytes:388563383 (388.5 MB) TX bytes:21484571 (21.4 MB) eth1 Link encap:Ethernet HWaddr 00:00:00:00:00:00 inet addr:192.168.1.1 Bcast:192.168.1.255 Mask:255.255.255.0 inet6 addr: fe80::215:5dff:fe5f:bf08/64 Scope:Link UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 RX packets:386614 errors:0 dropped:0 overruns:0 frame:0 TX packets:201356 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:1000 RX bytes:32507619 (32.5 MB) TX bytes:78342503 (78.3 MB) lo Link encap:Local Loopback inet addr:127.0.0.1 Mask:255.0.0.0 inet6 addr: ::1/128 Scope:Host UP LOOPBACK RUNNING MTU:65536 Metric:1 RX packets:2561 errors:0 dropped:0 overruns:0 frame:0 TX packets:2561 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 """ class TestUtil(unittest.TestCase): @Mockup(waagent, "RunGetOutput", MockFunc('', (0, SampleInterfaceInfo))) def test_getInterfaceNameByMac(self): distro = waagent.AbstractDistro() ifName = distro.getInterfaceNameByMac("ff:ff:ff:ff:ff:ff") self.assertEquals("eth0", ifName) ifName = distro.getInterfaceNameByMac("00:00:00:00:00:00") self.assertEquals("eth1", ifName) if __name__ == '__main__': unittest.main()
apache-2.0
denis-pitul/django
django/core/files/move.py
554
2794
""" Move a file in the safest way possible:: >>> from django.core.files.move import file_move_safe >>> file_move_safe("/tmp/old_file", "/tmp/new_file") """ import os from shutil import copystat from django.core.files import locks __all__ = ['file_move_safe'] def _samefile(src, dst): # Macintosh, Unix. if hasattr(os.path, 'samefile'): try: return os.path.samefile(src, dst) except OSError: return False # All other platforms: check for same pathname. return (os.path.normcase(os.path.abspath(src)) == os.path.normcase(os.path.abspath(dst))) def file_move_safe(old_file_name, new_file_name, chunk_size=1024 * 64, allow_overwrite=False): """ Moves a file from one location to another in the safest way possible. First, tries ``os.rename``, which is simple but will break across filesystems. If that fails, streams manually from one file to another in pure Python. If the destination file exists and ``allow_overwrite`` is ``False``, this function will throw an ``IOError``. """ # There's no reason to move if we don't have to. if _samefile(old_file_name, new_file_name): return try: # If the destination file exists and allow_overwrite is False then raise an IOError if not allow_overwrite and os.access(new_file_name, os.F_OK): raise IOError("Destination file %s exists and allow_overwrite is False" % new_file_name) os.rename(old_file_name, new_file_name) return except OSError: # This will happen with os.rename if moving to another filesystem # or when moving opened files on certain operating systems pass # first open the old file, so that it won't go away with open(old_file_name, 'rb') as old_file: # now open the new file, not forgetting allow_overwrite fd = os.open(new_file_name, (os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0) | (os.O_EXCL if not allow_overwrite else 0))) try: locks.lock(fd, locks.LOCK_EX) current_chunk = None while current_chunk != b'': current_chunk = old_file.read(chunk_size) os.write(fd, current_chunk) finally: locks.unlock(fd) os.close(fd) copystat(old_file_name, new_file_name) try: os.remove(old_file_name) except OSError as e: # Certain operating systems (Cygwin and Windows) # fail when deleting opened files, ignore it. (For the # systems where this happens, temporary files will be auto-deleted # on close anyway.) if getattr(e, 'winerror', 0) != 32 and getattr(e, 'errno', 0) != 13: raise
bsd-3-clause
cedriclaunay/gaffer
python/GafferSceneUI/SubTreeUI.py
1
2712
########################################################################## # # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import Gaffer import GafferUI import GafferScene import GafferSceneUI ########################################################################## # Metadata ########################################################################## Gaffer.Metadata.registerNodeDescription( GafferScene.SubTree, """A node for extracting a specific branch from a scene.""", "root", """The location to become the new root for the output scene. All locations below this will be kept, and all others will be discarded.""", ) ########################################################################## # Widgets and Gadgets ########################################################################## GafferUI.PlugValueWidget.registerCreator( GafferScene.SubTree, "root", lambda plug : GafferUI.PathPlugValueWidget( plug, path = GafferScene.ScenePath( plug.node()["in"], plug.node().scriptNode().context(), "/" ), ), )
bsd-3-clause
mzdaniel/oh-mainline
vendor/packages/Django/django/utils/regex_helper.py
361
12079
""" Functions for reversing a regular expression (used in reverse URL resolving). Used internally by Django and not intended for external use. This is not, and is not intended to be, a complete reg-exp decompiler. It should be good enough for a large class of URLS, however. """ # Mapping of an escape character to a representative of that class. So, e.g., # "\w" is replaced by "x" in a reverse URL. A value of None means to ignore # this sequence. Any missing key is mapped to itself. ESCAPE_MAPPINGS = { "A": None, "b": None, "B": None, "d": u"0", "D": u"x", "s": u" ", "S": u"x", "w": u"x", "W": u"!", "Z": None, } class Choice(list): """ Used to represent multiple possibilities at this point in a pattern string. We use a distinguished type, rather than a list, so that the usage in the code is clear. """ class Group(list): """ Used to represent a capturing group in the pattern string. """ class NonCapture(list): """ Used to represent a non-capturing group in the pattern string. """ def normalize(pattern): """ Given a reg-exp pattern, normalizes it to a list of forms that suffice for reverse matching. This does the following: (1) For any repeating sections, keeps the minimum number of occurrences permitted (this means zero for optional groups). (2) If an optional group includes parameters, include one occurrence of that group (along with the zero occurrence case from step (1)). (3) Select the first (essentially an arbitrary) element from any character class. Select an arbitrary character for any unordered class (e.g. '.' or '\w') in the pattern. (5) Ignore comments and any of the reg-exp flags that won't change what we construct ("iLmsu"). "(?x)" is an error, however. (6) Raise an error on all other non-capturing (?...) forms (e.g. look-ahead and look-behind matches) and any disjunctive ('|') constructs. Django's URLs for forward resolving are either all positional arguments or all keyword arguments. That is assumed here, as well. Although reverse resolving can be done using positional args when keyword args are specified, the two cannot be mixed in the same reverse() call. """ # Do a linear scan to work out the special features of this pattern. The # idea is that we scan once here and collect all the information we need to # make future decisions. result = [] non_capturing_groups = [] consume_next = True pattern_iter = next_char(iter(pattern)) num_args = 0 # A "while" loop is used here because later on we need to be able to peek # at the next character and possibly go around without consuming another # one at the top of the loop. try: ch, escaped = pattern_iter.next() except StopIteration: return zip([u''], [[]]) try: while True: if escaped: result.append(ch) elif ch == '.': # Replace "any character" with an arbitrary representative. result.append(u".") elif ch == '|': # FIXME: One day we'll should do this, but not in 1.0. raise NotImplementedError elif ch == "^": pass elif ch == '$': break elif ch == ')': # This can only be the end of a non-capturing group, since all # other unescaped parentheses are handled by the grouping # section later (and the full group is handled there). # # We regroup everything inside the capturing group so that it # can be quantified, if necessary. start = non_capturing_groups.pop() inner = NonCapture(result[start:]) result = result[:start] + [inner] elif ch == '[': # Replace ranges with the first character in the range. ch, escaped = pattern_iter.next() result.append(ch) ch, escaped = pattern_iter.next() while escaped or ch != ']': ch, escaped = pattern_iter.next() elif ch == '(': # Some kind of group. ch, escaped = pattern_iter.next() if ch != '?' or escaped: # A positional group name = "_%d" % num_args num_args += 1 result.append(Group(((u"%%(%s)s" % name), name))) walk_to_end(ch, pattern_iter) else: ch, escaped = pattern_iter.next() if ch in "iLmsu#": # All of these are ignorable. Walk to the end of the # group. walk_to_end(ch, pattern_iter) elif ch == ':': # Non-capturing group non_capturing_groups.append(len(result)) elif ch != 'P': # Anything else, other than a named group, is something # we cannot reverse. raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch) else: ch, escaped = pattern_iter.next() if ch != '<': raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch) # We are in a named capturing group. Extra the name and # then skip to the end. name = [] ch, escaped = pattern_iter.next() while ch != '>': name.append(ch) ch, escaped = pattern_iter.next() param = ''.join(name) result.append(Group(((u"%%(%s)s" % param), param))) walk_to_end(ch, pattern_iter) elif ch in "*?+{": # Quanitifers affect the previous item in the result list. count, ch = get_quantifier(ch, pattern_iter) if ch: # We had to look ahead, but it wasn't need to compute the # quanitifer, so use this character next time around the # main loop. consume_next = False if count == 0: if contains(result[-1], Group): # If we are quantifying a capturing group (or # something containing such a group) and the minimum is # zero, we must also handle the case of one occurrence # being present. All the quantifiers (except {0,0}, # which we conveniently ignore) that have a 0 minimum # also allow a single occurrence. result[-1] = Choice([None, result[-1]]) else: result.pop() elif count > 1: result.extend([result[-1]] * (count - 1)) else: # Anything else is a literal. result.append(ch) if consume_next: ch, escaped = pattern_iter.next() else: consume_next = True except StopIteration: pass except NotImplementedError: # A case of using the disjunctive form. No results for you! return zip([u''], [[]]) return zip(*flatten_result(result)) def next_char(input_iter): """ An iterator that yields the next character from "pattern_iter", respecting escape sequences. An escaped character is replaced by a representative of its class (e.g. \w -> "x"). If the escaped character is one that is skipped, it is not returned (the next character is returned instead). Yields the next character, along with a boolean indicating whether it is a raw (unescaped) character or not. """ for ch in input_iter: if ch != '\\': yield ch, False continue ch = input_iter.next() representative = ESCAPE_MAPPINGS.get(ch, ch) if representative is None: continue yield representative, True def walk_to_end(ch, input_iter): """ The iterator is currently inside a capturing group. We want to walk to the close of this group, skipping over any nested groups and handling escaped parentheses correctly. """ if ch == '(': nesting = 1 else: nesting = 0 for ch, escaped in input_iter: if escaped: continue elif ch == '(': nesting += 1 elif ch == ')': if not nesting: return nesting -= 1 def get_quantifier(ch, input_iter): """ Parse a quantifier from the input, where "ch" is the first character in the quantifier. Returns the minimum number of occurences permitted by the quantifier and either None or the next character from the input_iter if the next character is not part of the quantifier. """ if ch in '*?+': try: ch2, escaped = input_iter.next() except StopIteration: ch2 = None if ch2 == '?': ch2 = None if ch == '+': return 1, ch2 return 0, ch2 quant = [] while ch != '}': ch, escaped = input_iter.next() quant.append(ch) quant = quant[:-1] values = ''.join(quant).split(',') # Consume the trailing '?', if necessary. try: ch, escaped = input_iter.next() except StopIteration: ch = None if ch == '?': ch = None return int(values[0]), ch def contains(source, inst): """ Returns True if the "source" contains an instance of "inst". False, otherwise. """ if isinstance(source, inst): return True if isinstance(source, NonCapture): for elt in source: if contains(elt, inst): return True return False def flatten_result(source): """ Turns the given source sequence into a list of reg-exp possibilities and their arguments. Returns a list of strings and a list of argument lists. Each of the two lists will be of the same length. """ if source is None: return [u''], [[]] if isinstance(source, Group): if source[1] is None: params = [] else: params = [source[1]] return [source[0]], [params] result = [u''] result_args = [[]] pos = last = 0 for pos, elt in enumerate(source): if isinstance(elt, basestring): continue piece = u''.join(source[last:pos]) if isinstance(elt, Group): piece += elt[0] param = elt[1] else: param = None last = pos + 1 for i in range(len(result)): result[i] += piece if param: result_args[i].append(param) if isinstance(elt, (Choice, NonCapture)): if isinstance(elt, NonCapture): elt = [elt] inner_result, inner_args = [], [] for item in elt: res, args = flatten_result(item) inner_result.extend(res) inner_args.extend(args) new_result = [] new_args = [] for item, args in zip(result, result_args): for i_item, i_args in zip(inner_result, inner_args): new_result.append(item + i_item) new_args.append(args[:] + i_args) result = new_result result_args = new_args if pos >= last: piece = u''.join(source[last:]) for i in range(len(result)): result[i] += piece return result, result_args
agpl-3.0
yavuzovski/playground
python/django/RESTTest/.venv/lib/python3.4/site-packages/django/contrib/redirects/middleware.py
100
1961
from __future__ import unicode_literals from django import http from django.apps import apps from django.conf import settings from django.contrib.redirects.models import Redirect from django.contrib.sites.shortcuts import get_current_site from django.core.exceptions import ImproperlyConfigured from django.utils.deprecation import MiddlewareMixin class RedirectFallbackMiddleware(MiddlewareMixin): # Defined as class-level attributes to be subclassing-friendly. response_gone_class = http.HttpResponseGone response_redirect_class = http.HttpResponsePermanentRedirect def __init__(self, get_response=None): if not apps.is_installed('django.contrib.sites'): raise ImproperlyConfigured( "You cannot use RedirectFallbackMiddleware when " "django.contrib.sites is not installed." ) super(RedirectFallbackMiddleware, self).__init__(get_response) def process_response(self, request, response): # No need to check for a redirect for non-404 responses. if response.status_code != 404: return response full_path = request.get_full_path() current_site = get_current_site(request) r = None try: r = Redirect.objects.get(site=current_site, old_path=full_path) except Redirect.DoesNotExist: pass if r is None and settings.APPEND_SLASH and not request.path.endswith('/'): try: r = Redirect.objects.get( site=current_site, old_path=request.get_full_path(force_append_slash=True), ) except Redirect.DoesNotExist: pass if r is not None: if r.new_path == '': return self.response_gone_class() return self.response_redirect_class(r.new_path) # No redirect was found. Return the response. return response
gpl-3.0
phelix/bitcoin
qa/rpc-tests/test_framework/script.py
53
23854
# # script.py # # This file is modified from python-bitcoinlib. # # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # """Scripts Functionality to build scripts, as well as SignatureHash(). """ from __future__ import absolute_import, division, print_function, unicode_literals from test_framework.mininode import CTransaction, CTxOut, hash256 import sys bchr = chr bord = ord if sys.version > '3': long = int bchr = lambda x: bytes([x]) bord = lambda x: x import copy import struct from test_framework.bignum import bn2vch MAX_SCRIPT_SIZE = 10000 MAX_SCRIPT_ELEMENT_SIZE = 520 MAX_SCRIPT_OPCODES = 201 OPCODE_NAMES = {} _opcode_instances = [] class CScriptOp(int): """A single script opcode""" __slots__ = [] @staticmethod def encode_op_pushdata(d): """Encode a PUSHDATA op, returning bytes""" if len(d) < 0x4c: return b'' + bchr(len(d)) + d # OP_PUSHDATA elif len(d) <= 0xff: return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1 elif len(d) <= 0xffff: return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2 elif len(d) <= 0xffffffff: return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4 else: raise ValueError("Data too long to encode in a PUSHDATA op") @staticmethod def encode_op_n(n): """Encode a small integer op, returning an opcode""" if not (0 <= n <= 16): raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n) if n == 0: return OP_0 else: return CScriptOp(OP_1 + n-1) def decode_op_n(self): """Decode a small integer opcode, returning an integer""" if self == OP_0: return 0 if not (self == OP_0 or OP_1 <= self <= OP_16): raise ValueError('op %r is not an OP_N' % self) return int(self - OP_1+1) def is_small_int(self): """Return true if the op pushes a small integer to the stack""" if 0x51 <= self <= 0x60 or self == 0: return True else: return False def __str__(self): return repr(self) def __repr__(self): if self in OPCODE_NAMES: return OPCODE_NAMES[self] else: return 'CScriptOp(0x%x)' % self def __new__(cls, n): try: return _opcode_instances[n] except IndexError: assert len(_opcode_instances) == n _opcode_instances.append(super(CScriptOp, cls).__new__(cls, n)) return _opcode_instances[n] # Populate opcode instance table for n in range(0xff+1): CScriptOp(n) # push value OP_0 = CScriptOp(0x00) OP_FALSE = OP_0 OP_PUSHDATA1 = CScriptOp(0x4c) OP_PUSHDATA2 = CScriptOp(0x4d) OP_PUSHDATA4 = CScriptOp(0x4e) OP_1NEGATE = CScriptOp(0x4f) OP_RESERVED = CScriptOp(0x50) OP_1 = CScriptOp(0x51) OP_TRUE=OP_1 OP_2 = CScriptOp(0x52) OP_3 = CScriptOp(0x53) OP_4 = CScriptOp(0x54) OP_5 = CScriptOp(0x55) OP_6 = CScriptOp(0x56) OP_7 = CScriptOp(0x57) OP_8 = CScriptOp(0x58) OP_9 = CScriptOp(0x59) OP_10 = CScriptOp(0x5a) OP_11 = CScriptOp(0x5b) OP_12 = CScriptOp(0x5c) OP_13 = CScriptOp(0x5d) OP_14 = CScriptOp(0x5e) OP_15 = CScriptOp(0x5f) OP_16 = CScriptOp(0x60) # control OP_NOP = CScriptOp(0x61) OP_VER = CScriptOp(0x62) OP_IF = CScriptOp(0x63) OP_NOTIF = CScriptOp(0x64) OP_VERIF = CScriptOp(0x65) OP_VERNOTIF = CScriptOp(0x66) OP_ELSE = CScriptOp(0x67) OP_ENDIF = CScriptOp(0x68) OP_VERIFY = CScriptOp(0x69) OP_RETURN = CScriptOp(0x6a) # stack ops OP_TOALTSTACK = CScriptOp(0x6b) OP_FROMALTSTACK = CScriptOp(0x6c) OP_2DROP = CScriptOp(0x6d) OP_2DUP = CScriptOp(0x6e) OP_3DUP = CScriptOp(0x6f) OP_2OVER = CScriptOp(0x70) OP_2ROT = CScriptOp(0x71) OP_2SWAP = CScriptOp(0x72) OP_IFDUP = CScriptOp(0x73) OP_DEPTH = CScriptOp(0x74) OP_DROP = CScriptOp(0x75) OP_DUP = CScriptOp(0x76) OP_NIP = CScriptOp(0x77) OP_OVER = CScriptOp(0x78) OP_PICK = CScriptOp(0x79) OP_ROLL = CScriptOp(0x7a) OP_ROT = CScriptOp(0x7b) OP_SWAP = CScriptOp(0x7c) OP_TUCK = CScriptOp(0x7d) # splice ops OP_CAT = CScriptOp(0x7e) OP_SUBSTR = CScriptOp(0x7f) OP_LEFT = CScriptOp(0x80) OP_RIGHT = CScriptOp(0x81) OP_SIZE = CScriptOp(0x82) # bit logic OP_INVERT = CScriptOp(0x83) OP_AND = CScriptOp(0x84) OP_OR = CScriptOp(0x85) OP_XOR = CScriptOp(0x86) OP_EQUAL = CScriptOp(0x87) OP_EQUALVERIFY = CScriptOp(0x88) OP_RESERVED1 = CScriptOp(0x89) OP_RESERVED2 = CScriptOp(0x8a) # numeric OP_1ADD = CScriptOp(0x8b) OP_1SUB = CScriptOp(0x8c) OP_2MUL = CScriptOp(0x8d) OP_2DIV = CScriptOp(0x8e) OP_NEGATE = CScriptOp(0x8f) OP_ABS = CScriptOp(0x90) OP_NOT = CScriptOp(0x91) OP_0NOTEQUAL = CScriptOp(0x92) OP_ADD = CScriptOp(0x93) OP_SUB = CScriptOp(0x94) OP_MUL = CScriptOp(0x95) OP_DIV = CScriptOp(0x96) OP_MOD = CScriptOp(0x97) OP_LSHIFT = CScriptOp(0x98) OP_RSHIFT = CScriptOp(0x99) OP_BOOLAND = CScriptOp(0x9a) OP_BOOLOR = CScriptOp(0x9b) OP_NUMEQUAL = CScriptOp(0x9c) OP_NUMEQUALVERIFY = CScriptOp(0x9d) OP_NUMNOTEQUAL = CScriptOp(0x9e) OP_LESSTHAN = CScriptOp(0x9f) OP_GREATERTHAN = CScriptOp(0xa0) OP_LESSTHANOREQUAL = CScriptOp(0xa1) OP_GREATERTHANOREQUAL = CScriptOp(0xa2) OP_MIN = CScriptOp(0xa3) OP_MAX = CScriptOp(0xa4) OP_WITHIN = CScriptOp(0xa5) # crypto OP_RIPEMD160 = CScriptOp(0xa6) OP_SHA1 = CScriptOp(0xa7) OP_SHA256 = CScriptOp(0xa8) OP_HASH160 = CScriptOp(0xa9) OP_HASH256 = CScriptOp(0xaa) OP_CODESEPARATOR = CScriptOp(0xab) OP_CHECKSIG = CScriptOp(0xac) OP_CHECKSIGVERIFY = CScriptOp(0xad) OP_CHECKMULTISIG = CScriptOp(0xae) OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf) # expansion OP_NOP1 = CScriptOp(0xb0) OP_NOP2 = CScriptOp(0xb1) OP_NOP3 = CScriptOp(0xb2) OP_NOP4 = CScriptOp(0xb3) OP_NOP5 = CScriptOp(0xb4) OP_NOP6 = CScriptOp(0xb5) OP_NOP7 = CScriptOp(0xb6) OP_NOP8 = CScriptOp(0xb7) OP_NOP9 = CScriptOp(0xb8) OP_NOP10 = CScriptOp(0xb9) # template matching params OP_SMALLINTEGER = CScriptOp(0xfa) OP_PUBKEYS = CScriptOp(0xfb) OP_PUBKEYHASH = CScriptOp(0xfd) OP_PUBKEY = CScriptOp(0xfe) OP_INVALIDOPCODE = CScriptOp(0xff) VALID_OPCODES = { OP_1NEGATE, OP_RESERVED, OP_1, OP_2, OP_3, OP_4, OP_5, OP_6, OP_7, OP_8, OP_9, OP_10, OP_11, OP_12, OP_13, OP_14, OP_15, OP_16, OP_NOP, OP_VER, OP_IF, OP_NOTIF, OP_VERIF, OP_VERNOTIF, OP_ELSE, OP_ENDIF, OP_VERIFY, OP_RETURN, OP_TOALTSTACK, OP_FROMALTSTACK, OP_2DROP, OP_2DUP, OP_3DUP, OP_2OVER, OP_2ROT, OP_2SWAP, OP_IFDUP, OP_DEPTH, OP_DROP, OP_DUP, OP_NIP, OP_OVER, OP_PICK, OP_ROLL, OP_ROT, OP_SWAP, OP_TUCK, OP_CAT, OP_SUBSTR, OP_LEFT, OP_RIGHT, OP_SIZE, OP_INVERT, OP_AND, OP_OR, OP_XOR, OP_EQUAL, OP_EQUALVERIFY, OP_RESERVED1, OP_RESERVED2, OP_1ADD, OP_1SUB, OP_2MUL, OP_2DIV, OP_NEGATE, OP_ABS, OP_NOT, OP_0NOTEQUAL, OP_ADD, OP_SUB, OP_MUL, OP_DIV, OP_MOD, OP_LSHIFT, OP_RSHIFT, OP_BOOLAND, OP_BOOLOR, OP_NUMEQUAL, OP_NUMEQUALVERIFY, OP_NUMNOTEQUAL, OP_LESSTHAN, OP_GREATERTHAN, OP_LESSTHANOREQUAL, OP_GREATERTHANOREQUAL, OP_MIN, OP_MAX, OP_WITHIN, OP_RIPEMD160, OP_SHA1, OP_SHA256, OP_HASH160, OP_HASH256, OP_CODESEPARATOR, OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY, OP_NOP1, OP_NOP2, OP_NOP3, OP_NOP4, OP_NOP5, OP_NOP6, OP_NOP7, OP_NOP8, OP_NOP9, OP_NOP10, OP_SMALLINTEGER, OP_PUBKEYS, OP_PUBKEYHASH, OP_PUBKEY, } OPCODE_NAMES.update({ OP_0 : 'OP_0', OP_PUSHDATA1 : 'OP_PUSHDATA1', OP_PUSHDATA2 : 'OP_PUSHDATA2', OP_PUSHDATA4 : 'OP_PUSHDATA4', OP_1NEGATE : 'OP_1NEGATE', OP_RESERVED : 'OP_RESERVED', OP_1 : 'OP_1', OP_2 : 'OP_2', OP_3 : 'OP_3', OP_4 : 'OP_4', OP_5 : 'OP_5', OP_6 : 'OP_6', OP_7 : 'OP_7', OP_8 : 'OP_8', OP_9 : 'OP_9', OP_10 : 'OP_10', OP_11 : 'OP_11', OP_12 : 'OP_12', OP_13 : 'OP_13', OP_14 : 'OP_14', OP_15 : 'OP_15', OP_16 : 'OP_16', OP_NOP : 'OP_NOP', OP_VER : 'OP_VER', OP_IF : 'OP_IF', OP_NOTIF : 'OP_NOTIF', OP_VERIF : 'OP_VERIF', OP_VERNOTIF : 'OP_VERNOTIF', OP_ELSE : 'OP_ELSE', OP_ENDIF : 'OP_ENDIF', OP_VERIFY : 'OP_VERIFY', OP_RETURN : 'OP_RETURN', OP_TOALTSTACK : 'OP_TOALTSTACK', OP_FROMALTSTACK : 'OP_FROMALTSTACK', OP_2DROP : 'OP_2DROP', OP_2DUP : 'OP_2DUP', OP_3DUP : 'OP_3DUP', OP_2OVER : 'OP_2OVER', OP_2ROT : 'OP_2ROT', OP_2SWAP : 'OP_2SWAP', OP_IFDUP : 'OP_IFDUP', OP_DEPTH : 'OP_DEPTH', OP_DROP : 'OP_DROP', OP_DUP : 'OP_DUP', OP_NIP : 'OP_NIP', OP_OVER : 'OP_OVER', OP_PICK : 'OP_PICK', OP_ROLL : 'OP_ROLL', OP_ROT : 'OP_ROT', OP_SWAP : 'OP_SWAP', OP_TUCK : 'OP_TUCK', OP_CAT : 'OP_CAT', OP_SUBSTR : 'OP_SUBSTR', OP_LEFT : 'OP_LEFT', OP_RIGHT : 'OP_RIGHT', OP_SIZE : 'OP_SIZE', OP_INVERT : 'OP_INVERT', OP_AND : 'OP_AND', OP_OR : 'OP_OR', OP_XOR : 'OP_XOR', OP_EQUAL : 'OP_EQUAL', OP_EQUALVERIFY : 'OP_EQUALVERIFY', OP_RESERVED1 : 'OP_RESERVED1', OP_RESERVED2 : 'OP_RESERVED2', OP_1ADD : 'OP_1ADD', OP_1SUB : 'OP_1SUB', OP_2MUL : 'OP_2MUL', OP_2DIV : 'OP_2DIV', OP_NEGATE : 'OP_NEGATE', OP_ABS : 'OP_ABS', OP_NOT : 'OP_NOT', OP_0NOTEQUAL : 'OP_0NOTEQUAL', OP_ADD : 'OP_ADD', OP_SUB : 'OP_SUB', OP_MUL : 'OP_MUL', OP_DIV : 'OP_DIV', OP_MOD : 'OP_MOD', OP_LSHIFT : 'OP_LSHIFT', OP_RSHIFT : 'OP_RSHIFT', OP_BOOLAND : 'OP_BOOLAND', OP_BOOLOR : 'OP_BOOLOR', OP_NUMEQUAL : 'OP_NUMEQUAL', OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY', OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL', OP_LESSTHAN : 'OP_LESSTHAN', OP_GREATERTHAN : 'OP_GREATERTHAN', OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL', OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL', OP_MIN : 'OP_MIN', OP_MAX : 'OP_MAX', OP_WITHIN : 'OP_WITHIN', OP_RIPEMD160 : 'OP_RIPEMD160', OP_SHA1 : 'OP_SHA1', OP_SHA256 : 'OP_SHA256', OP_HASH160 : 'OP_HASH160', OP_HASH256 : 'OP_HASH256', OP_CODESEPARATOR : 'OP_CODESEPARATOR', OP_CHECKSIG : 'OP_CHECKSIG', OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY', OP_CHECKMULTISIG : 'OP_CHECKMULTISIG', OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY', OP_NOP1 : 'OP_NOP1', OP_NOP2 : 'OP_NOP2', OP_NOP3 : 'OP_NOP3', OP_NOP4 : 'OP_NOP4', OP_NOP5 : 'OP_NOP5', OP_NOP6 : 'OP_NOP6', OP_NOP7 : 'OP_NOP7', OP_NOP8 : 'OP_NOP8', OP_NOP9 : 'OP_NOP9', OP_NOP10 : 'OP_NOP10', OP_SMALLINTEGER : 'OP_SMALLINTEGER', OP_PUBKEYS : 'OP_PUBKEYS', OP_PUBKEYHASH : 'OP_PUBKEYHASH', OP_PUBKEY : 'OP_PUBKEY', OP_INVALIDOPCODE : 'OP_INVALIDOPCODE', }) OPCODES_BY_NAME = { 'OP_0' : OP_0, 'OP_PUSHDATA1' : OP_PUSHDATA1, 'OP_PUSHDATA2' : OP_PUSHDATA2, 'OP_PUSHDATA4' : OP_PUSHDATA4, 'OP_1NEGATE' : OP_1NEGATE, 'OP_RESERVED' : OP_RESERVED, 'OP_1' : OP_1, 'OP_2' : OP_2, 'OP_3' : OP_3, 'OP_4' : OP_4, 'OP_5' : OP_5, 'OP_6' : OP_6, 'OP_7' : OP_7, 'OP_8' : OP_8, 'OP_9' : OP_9, 'OP_10' : OP_10, 'OP_11' : OP_11, 'OP_12' : OP_12, 'OP_13' : OP_13, 'OP_14' : OP_14, 'OP_15' : OP_15, 'OP_16' : OP_16, 'OP_NOP' : OP_NOP, 'OP_VER' : OP_VER, 'OP_IF' : OP_IF, 'OP_NOTIF' : OP_NOTIF, 'OP_VERIF' : OP_VERIF, 'OP_VERNOTIF' : OP_VERNOTIF, 'OP_ELSE' : OP_ELSE, 'OP_ENDIF' : OP_ENDIF, 'OP_VERIFY' : OP_VERIFY, 'OP_RETURN' : OP_RETURN, 'OP_TOALTSTACK' : OP_TOALTSTACK, 'OP_FROMALTSTACK' : OP_FROMALTSTACK, 'OP_2DROP' : OP_2DROP, 'OP_2DUP' : OP_2DUP, 'OP_3DUP' : OP_3DUP, 'OP_2OVER' : OP_2OVER, 'OP_2ROT' : OP_2ROT, 'OP_2SWAP' : OP_2SWAP, 'OP_IFDUP' : OP_IFDUP, 'OP_DEPTH' : OP_DEPTH, 'OP_DROP' : OP_DROP, 'OP_DUP' : OP_DUP, 'OP_NIP' : OP_NIP, 'OP_OVER' : OP_OVER, 'OP_PICK' : OP_PICK, 'OP_ROLL' : OP_ROLL, 'OP_ROT' : OP_ROT, 'OP_SWAP' : OP_SWAP, 'OP_TUCK' : OP_TUCK, 'OP_CAT' : OP_CAT, 'OP_SUBSTR' : OP_SUBSTR, 'OP_LEFT' : OP_LEFT, 'OP_RIGHT' : OP_RIGHT, 'OP_SIZE' : OP_SIZE, 'OP_INVERT' : OP_INVERT, 'OP_AND' : OP_AND, 'OP_OR' : OP_OR, 'OP_XOR' : OP_XOR, 'OP_EQUAL' : OP_EQUAL, 'OP_EQUALVERIFY' : OP_EQUALVERIFY, 'OP_RESERVED1' : OP_RESERVED1, 'OP_RESERVED2' : OP_RESERVED2, 'OP_1ADD' : OP_1ADD, 'OP_1SUB' : OP_1SUB, 'OP_2MUL' : OP_2MUL, 'OP_2DIV' : OP_2DIV, 'OP_NEGATE' : OP_NEGATE, 'OP_ABS' : OP_ABS, 'OP_NOT' : OP_NOT, 'OP_0NOTEQUAL' : OP_0NOTEQUAL, 'OP_ADD' : OP_ADD, 'OP_SUB' : OP_SUB, 'OP_MUL' : OP_MUL, 'OP_DIV' : OP_DIV, 'OP_MOD' : OP_MOD, 'OP_LSHIFT' : OP_LSHIFT, 'OP_RSHIFT' : OP_RSHIFT, 'OP_BOOLAND' : OP_BOOLAND, 'OP_BOOLOR' : OP_BOOLOR, 'OP_NUMEQUAL' : OP_NUMEQUAL, 'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY, 'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL, 'OP_LESSTHAN' : OP_LESSTHAN, 'OP_GREATERTHAN' : OP_GREATERTHAN, 'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL, 'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL, 'OP_MIN' : OP_MIN, 'OP_MAX' : OP_MAX, 'OP_WITHIN' : OP_WITHIN, 'OP_RIPEMD160' : OP_RIPEMD160, 'OP_SHA1' : OP_SHA1, 'OP_SHA256' : OP_SHA256, 'OP_HASH160' : OP_HASH160, 'OP_HASH256' : OP_HASH256, 'OP_CODESEPARATOR' : OP_CODESEPARATOR, 'OP_CHECKSIG' : OP_CHECKSIG, 'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY, 'OP_CHECKMULTISIG' : OP_CHECKMULTISIG, 'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY, 'OP_NOP1' : OP_NOP1, 'OP_NOP2' : OP_NOP2, 'OP_NOP3' : OP_NOP3, 'OP_NOP4' : OP_NOP4, 'OP_NOP5' : OP_NOP5, 'OP_NOP6' : OP_NOP6, 'OP_NOP7' : OP_NOP7, 'OP_NOP8' : OP_NOP8, 'OP_NOP9' : OP_NOP9, 'OP_NOP10' : OP_NOP10, 'OP_SMALLINTEGER' : OP_SMALLINTEGER, 'OP_PUBKEYS' : OP_PUBKEYS, 'OP_PUBKEYHASH' : OP_PUBKEYHASH, 'OP_PUBKEY' : OP_PUBKEY, } class CScriptInvalidError(Exception): """Base class for CScript exceptions""" pass class CScriptTruncatedPushDataError(CScriptInvalidError): """Invalid pushdata due to truncation""" def __init__(self, msg, data): self.data = data super(CScriptTruncatedPushDataError, self).__init__(msg) # This is used, eg, for blockchain heights in coinbase scripts (bip34) class CScriptNum(object): def __init__(self, d=0): self.value = d @staticmethod def encode(obj): r = bytearray(0) if obj.value == 0: return bytes(r) neg = obj.value < 0 absvalue = -obj.value if neg else obj.value while (absvalue): r.append(chr(absvalue & 0xff)) absvalue >>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return bytes(bchr(len(r)) + r) class CScript(bytes): """Serialized script A bytes subclass, so you can use this directly whenever bytes are accepted. Note that this means that indexing does *not* work - you'll get an index by byte rather than opcode. This format was chosen for efficiency so that the general case would not require creating a lot of little CScriptOP objects. iter(script) however does iterate by opcode. """ @classmethod def __coerce_instance(cls, other): # Coerce other into bytes if isinstance(other, CScriptOp): other = bchr(other) elif isinstance(other, CScriptNum): if (other.value == 0): other = bchr(CScriptOp(OP_0)) else: other = CScriptNum.encode(other) elif isinstance(other, (int, long)): if 0 <= other <= 16: other = bytes(bchr(CScriptOp.encode_op_n(other))) elif other == -1: other = bytes(bchr(OP_1NEGATE)) else: other = CScriptOp.encode_op_pushdata(bn2vch(other)) elif isinstance(other, (bytes, bytearray)): other = CScriptOp.encode_op_pushdata(other) return other def __add__(self, other): # Do the coercion outside of the try block so that errors in it are # noticed. other = self.__coerce_instance(other) try: # bytes.__add__ always returns bytes instances unfortunately return CScript(super(CScript, self).__add__(other)) except TypeError: raise TypeError('Can not add a %r instance to a CScript' % other.__class__) def join(self, iterable): # join makes no sense for a CScript() raise NotImplementedError def __new__(cls, value=b''): if isinstance(value, bytes) or isinstance(value, bytearray): return super(CScript, cls).__new__(cls, value) else: def coerce_iterable(iterable): for instance in iterable: yield cls.__coerce_instance(instance) # Annoyingly on both python2 and python3 bytes.join() always # returns a bytes instance even when subclassed. return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value))) def raw_iter(self): """Raw iteration Yields tuples of (opcode, data, sop_idx) so that the different possible PUSHDATA encodings can be accurately distinguished, as well as determining the exact opcode byte indexes. (sop_idx) """ i = 0 while i < len(self): sop_idx = i opcode = bord(self[i]) i += 1 if opcode > OP_PUSHDATA4: yield (opcode, None, sop_idx) else: datasize = None pushdata_type = None if opcode < OP_PUSHDATA1: pushdata_type = 'PUSHDATA(%d)' % opcode datasize = opcode elif opcode == OP_PUSHDATA1: pushdata_type = 'PUSHDATA1' if i >= len(self): raise CScriptInvalidError('PUSHDATA1: missing data length') datasize = bord(self[i]) i += 1 elif opcode == OP_PUSHDATA2: pushdata_type = 'PUSHDATA2' if i + 1 >= len(self): raise CScriptInvalidError('PUSHDATA2: missing data length') datasize = bord(self[i]) + (bord(self[i+1]) << 8) i += 2 elif opcode == OP_PUSHDATA4: pushdata_type = 'PUSHDATA4' if i + 3 >= len(self): raise CScriptInvalidError('PUSHDATA4: missing data length') datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24) i += 4 else: assert False # shouldn't happen data = bytes(self[i:i+datasize]) # Check for truncation if len(data) < datasize: raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data) i += datasize yield (opcode, data, sop_idx) def __iter__(self): """'Cooked' iteration Returns either a CScriptOP instance, an integer, or bytes, as appropriate. See raw_iter() if you need to distinguish the different possible PUSHDATA encodings. """ for (opcode, data, sop_idx) in self.raw_iter(): if data is not None: yield data else: opcode = CScriptOp(opcode) if opcode.is_small_int(): yield opcode.decode_op_n() else: yield CScriptOp(opcode) def __repr__(self): # For Python3 compatibility add b before strings so testcases don't # need to change def _repr(o): if isinstance(o, bytes): return "x('%s')" % binascii.hexlify(o).decode('utf8') else: return repr(o) ops = [] i = iter(self) while True: op = None try: op = _repr(next(i)) except CScriptTruncatedPushDataError as err: op = '%s...<ERROR: %s>' % (_repr(err.data), err) break except CScriptInvalidError as err: op = '<ERROR: %s>' % err break except StopIteration: break finally: if op is not None: ops.append(op) return "CScript([%s])" % ', '.join(ops) def GetSigOpCount(self, fAccurate): """Get the SigOp count. fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details. Note that this is consensus-critical. """ n = 0 lastOpcode = OP_INVALIDOPCODE for (opcode, data, sop_idx) in self.raw_iter(): if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY): n += 1 elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): if fAccurate and (OP_1 <= lastOpcode <= OP_16): n += opcode.decode_op_n() else: n += 20 lastOpcode = opcode return n SIGHASH_ALL = 1 SIGHASH_NONE = 2 SIGHASH_SINGLE = 3 SIGHASH_ANYONECANPAY = 0x80 def FindAndDelete(script, sig): """Consensus critical, see FindAndDelete() in Satoshi codebase""" r = b'' last_sop_idx = sop_idx = 0 skip = True for (opcode, data, sop_idx) in script.raw_iter(): if not skip: r += script[last_sop_idx:sop_idx] last_sop_idx = sop_idx if script[sop_idx:sop_idx + len(sig)] == sig: skip = True else: skip = False if not skip: r += script[last_sop_idx:] return CScript(r) def SignatureHash(script, txTo, inIdx, hashtype): """Consensus-correct SignatureHash Returns (hash, err) to precisely match the consensus-critical behavior of the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity) """ HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' if inIdx >= len(txTo.vin): return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin))) txtmp = CTransaction(txTo) for txin in txtmp.vin: txin.scriptSig = b'' txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR])) if (hashtype & 0x1f) == SIGHASH_NONE: txtmp.vout = [] for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 elif (hashtype & 0x1f) == SIGHASH_SINGLE: outIdx = inIdx if outIdx >= len(txtmp.vout): return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout))) tmp = txtmp.vout[outIdx] txtmp.vout = [] for i in range(outIdx): txtmp.vout.append(CTxOut()) txtmp.vout.append(tmp) for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 if hashtype & SIGHASH_ANYONECANPAY: tmp = txtmp.vin[inIdx] txtmp.vin = [] txtmp.vin.append(tmp) s = txtmp.serialize() s += struct.pack(b"<I", hashtype) hash = hash256(s) return (hash, None)
mit
antonnik/code-classifier
naive_bayes/resources/python/utils.py
1
18823
# Copyright 2012-2014 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for testing pymongo """ import os import struct import sys import threading from nose.plugins.skip import SkipTest from pymongo import MongoClient, MongoReplicaSetClient from pymongo.errors import AutoReconnect from pymongo.pool import NO_REQUEST, NO_SOCKET_YET, SocketInfo from test import host, port, version try: import gevent has_gevent = True except ImportError: has_gevent = False # No functools in Python 2.4 def my_partial(f, *args, **kwargs): def _f(*new_args, **new_kwargs): final_kwargs = kwargs.copy() final_kwargs.update(new_kwargs) return f(*(args + new_args), **final_kwargs) return _f def one(s): """Get one element of a set""" return iter(s).next() def oid_generated_on_client(doc): """Is this process's PID in the document's _id?""" pid_from_doc = struct.unpack(">H", doc['_id'].binary[7:9])[0] return (os.getpid() % 0xFFFF) == pid_from_doc def delay(sec): # Javascript sleep() only available in MongoDB since version ~1.9 return '''function() { var d = new Date((new Date()).getTime() + %s * 1000); while (d > (new Date())) { }; return true; }''' % sec def get_command_line(client): command_line = client.admin.command('getCmdLineOpts') assert command_line['ok'] == 1, "getCmdLineOpts() failed" return command_line def server_started_with_option(client, cmdline_opt, config_opt): """Check if the server was started with a particular option. :Parameters: - `cmdline_opt`: The command line option (i.e. --nojournal) - `config_opt`: The config file option (i.e. nojournal) """ command_line = get_command_line(client) if 'parsed' in command_line: parsed = command_line['parsed'] if config_opt in parsed: return parsed[config_opt] argv = command_line['argv'] return cmdline_opt in argv def server_started_with_auth(client): command_line = get_command_line(client) # MongoDB >= 2.0 if 'parsed' in command_line: parsed = command_line['parsed'] # MongoDB >= 2.6 if 'security' in parsed: security = parsed['security'] # >= rc3 if 'authorization' in security: return security['authorization'] == 'enabled' # < rc3 return security.get('auth', False) or bool(security.get('keyFile')) return parsed.get('auth', False) or bool(parsed.get('keyFile')) # Legacy argv = command_line['argv'] return '--auth' in argv or '--keyFile' in argv def server_started_with_nojournal(client): command_line = get_command_line(client) # MongoDB 2.6. if 'parsed' in command_line: parsed = command_line['parsed'] if 'storage' in parsed: storage = parsed['storage'] if 'journal' in storage: return not storage['journal']['enabled'] return server_started_with_option(client, '--nojournal', 'nojournal') def server_is_master_with_slave(client): command_line = get_command_line(client) if 'parsed' in command_line: return command_line['parsed'].get('master', False) return '--master' in command_line['argv'] def drop_collections(db): for coll in db.collection_names(): if not coll.startswith('system'): db.drop_collection(coll) def remove_all_users(db): if version.at_least(db.connection, (2, 5, 3, -1)): db.command({"dropAllUsersFromDatabase": 1}) else: db.system.users.remove({}) def joinall(threads): """Join threads with a 5-minute timeout, assert joins succeeded""" for t in threads: t.join(300) assert not t.isAlive(), "Thread %s hung" % t def is_mongos(client): res = client.admin.command('ismaster') return res.get('msg', '') == 'isdbgrid' def enable_text_search(client): client.admin.command( 'setParameter', textSearchEnabled=True) if isinstance(client, MongoReplicaSetClient): for host, port in client.secondaries: MongoClient(host, port).admin.command( 'setParameter', textSearchEnabled=True) def assertRaisesExactly(cls, fn, *args, **kwargs): """ Unlike the standard assertRaises, this checks that a function raises a specific class of exception, and not a subclass. E.g., check that MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. """ try: fn(*args, **kwargs) except Exception, e: assert e.__class__ == cls, "got %s, expected %s" % ( e.__class__.__name__, cls.__name__) else: raise AssertionError("%s not raised" % cls) def looplet(greenlets): """World's smallest event loop; run until all greenlets are done """ while True: done = True for g in greenlets: if not g.dead: done = False g.switch() if done: return class RendezvousThread(threading.Thread): """A thread that starts and pauses at a rendezvous point before resuming. To be used in tests that must ensure that N threads are all alive simultaneously, regardless of thread-scheduling's vagaries. 1. Write a subclass of RendezvousThread and override before_rendezvous and / or after_rendezvous. 2. Create a state with RendezvousThread.shared_state(N) 3. Start N of your subclassed RendezvousThreads, passing the state to each one's __init__ 4. In the main thread, call RendezvousThread.wait_for_rendezvous 5. Test whatever you need to test while threads are paused at rendezvous point 6. In main thread, call RendezvousThread.resume_after_rendezvous 7. Join all threads from main thread 8. Assert that all threads' "passed" attribute is True 9. Test post-conditions """ class RendezvousState(object): def __init__(self, nthreads): # Number of threads total self.nthreads = nthreads # Number of threads that have arrived at rendezvous point self.arrived_threads = 0 self.arrived_threads_lock = threading.Lock() # Set when all threads reach rendezvous self.ev_arrived = threading.Event() # Set by resume_after_rendezvous() so threads can continue. self.ev_resume = threading.Event() @classmethod def create_shared_state(cls, nthreads): return RendezvousThread.RendezvousState(nthreads) def before_rendezvous(self): """Overridable: Do this before the rendezvous""" pass def after_rendezvous(self): """Overridable: Do this after the rendezvous. If it throws no exception, `passed` is set to True """ pass @classmethod def wait_for_rendezvous(cls, state): """Wait for all threads to reach rendezvous and pause there""" state.ev_arrived.wait(10) assert state.ev_arrived.isSet(), "Thread timeout" assert state.nthreads == state.arrived_threads @classmethod def resume_after_rendezvous(cls, state): """Tell all the paused threads to continue""" state.ev_resume.set() def __init__(self, state): """Params: `state`: A shared state object from RendezvousThread.shared_state() """ super(RendezvousThread, self).__init__() self.state = state self.passed = False # If this thread fails to terminate, don't hang the whole program self.setDaemon(True) def _rendezvous(self): """Pause until all threads arrive here""" s = self.state s.arrived_threads_lock.acquire() s.arrived_threads += 1 if s.arrived_threads == s.nthreads: s.arrived_threads_lock.release() s.ev_arrived.set() else: s.arrived_threads_lock.release() s.ev_arrived.wait() def run(self): try: self.before_rendezvous() finally: self._rendezvous() # all threads have passed the rendezvous, wait for # resume_after_rendezvous() self.state.ev_resume.wait() self.after_rendezvous() self.passed = True def read_from_which_host( rsc, mode, tag_sets=None, secondary_acceptable_latency_ms=15 ): """Read from a MongoReplicaSetClient with the given Read Preference mode, tags, and acceptable latency. Return the 'host:port' which was read from. :Parameters: - `rsc`: A MongoReplicaSetClient - `mode`: A ReadPreference - `tag_sets`: List of dicts of tags for data-center-aware reads - `secondary_acceptable_latency_ms`: a float """ db = rsc.pymongo_test db.read_preference = mode if isinstance(tag_sets, dict): tag_sets = [tag_sets] db.tag_sets = tag_sets or [{}] db.secondary_acceptable_latency_ms = secondary_acceptable_latency_ms cursor = db.test.find() try: try: cursor.next() except StopIteration: # No documents in collection, that's fine pass return cursor._Cursor__connection_id except AutoReconnect: return None def assertReadFrom(testcase, rsc, member, *args, **kwargs): """Check that a query with the given mode, tag_sets, and secondary_acceptable_latency_ms reads from the expected replica-set member :Parameters: - `testcase`: A unittest.TestCase - `rsc`: A MongoReplicaSetClient - `member`: A host:port expected to be used - `mode`: A ReadPreference - `tag_sets` (optional): List of dicts of tags for data-center-aware reads - `secondary_acceptable_latency_ms` (optional): a float """ for _ in range(10): testcase.assertEqual(member, read_from_which_host(rsc, *args, **kwargs)) def assertReadFromAll(testcase, rsc, members, *args, **kwargs): """Check that a query with the given mode, tag_sets, and secondary_acceptable_latency_ms reads from all members in a set, and only members in that set. :Parameters: - `testcase`: A unittest.TestCase - `rsc`: A MongoReplicaSetClient - `members`: Sequence of host:port expected to be used - `mode`: A ReadPreference - `tag_sets` (optional): List of dicts of tags for data-center-aware reads - `secondary_acceptable_latency_ms` (optional): a float """ members = set(members) used = set() for _ in range(100): used.add(read_from_which_host(rsc, *args, **kwargs)) testcase.assertEqual(members, used) def get_pool(client): if isinstance(client, MongoClient): return client._MongoClient__member.pool elif isinstance(client, MongoReplicaSetClient): rs_state = client._MongoReplicaSetClient__rs_state return rs_state.primary_member.pool else: raise TypeError(str(client)) def pools_from_rs_client(client): """Get Pool instances from a MongoReplicaSetClient or ReplicaSetConnection. """ return [ member.pool for member in client._MongoReplicaSetClient__rs_state.members] class TestRequestMixin(object): """Inherit from this class and from unittest.TestCase to get some convenient methods for testing connection pools and requests """ def assertSameSock(self, pool): sock_info0 = pool.get_socket() sock_info1 = pool.get_socket() self.assertEqual(sock_info0, sock_info1) pool.maybe_return_socket(sock_info0) pool.maybe_return_socket(sock_info1) def assertDifferentSock(self, pool): sock_info0 = pool.get_socket() sock_info1 = pool.get_socket() self.assertNotEqual(sock_info0, sock_info1) pool.maybe_return_socket(sock_info0) pool.maybe_return_socket(sock_info1) def assertNoRequest(self, pool): self.assertEqual(NO_REQUEST, pool._get_request_state()) def assertNoSocketYet(self, pool): self.assertEqual(NO_SOCKET_YET, pool._get_request_state()) def assertRequestSocket(self, pool): self.assertTrue(isinstance(pool._get_request_state(), SocketInfo)) def assertInRequestAndSameSock(self, client, pools): self.assertTrue(client.in_request()) if not isinstance(pools, list): pools = [pools] for pool in pools: self.assertTrue(pool.in_request()) self.assertSameSock(pool) def assertNotInRequestAndDifferentSock(self, client, pools): self.assertFalse(client.in_request()) if not isinstance(pools, list): pools = [pools] for pool in pools: self.assertFalse(pool.in_request()) self.assertDifferentSock(pool) # Constants for run_threads and _TestLazyConnectMixin. NTRIALS = 5 NTHREADS = 10 def run_threads(collection, target, use_greenlets): """Run a target function in many threads. target is a function taking a Collection and an integer. """ threads = [] for i in range(NTHREADS): bound_target = my_partial(target, collection, i) if use_greenlets: threads.append(gevent.Greenlet(run=bound_target)) else: threads.append(threading.Thread(target=bound_target)) for t in threads: t.start() for t in threads: t.join(30) if use_greenlets: # bool(Greenlet) is True if it's alive. assert not t else: assert not t.isAlive() def lazy_client_trial(reset, target, test, get_client, use_greenlets): """Test concurrent operations on a lazily-connecting client. `reset` takes a collection and resets it for the next trial. `target` takes a lazily-connecting collection and an index from 0 to NTHREADS, and performs some operation, e.g. an insert. `test` takes the lazily-connecting collection and asserts a post-condition to prove `target` succeeded. """ if use_greenlets and not has_gevent: raise SkipTest('Gevent not installed') collection = MongoClient(host, port).pymongo_test.test # Make concurrency bugs more likely to manifest. interval = None if not sys.platform.startswith('java'): if sys.version_info >= (3, 2): interval = sys.getswitchinterval() sys.setswitchinterval(1e-6) else: interval = sys.getcheckinterval() sys.setcheckinterval(1) try: for i in range(NTRIALS): reset(collection) lazy_client = get_client( _connect=False, use_greenlets=use_greenlets) lazy_collection = lazy_client.pymongo_test.test run_threads(lazy_collection, target, use_greenlets) test(lazy_collection) finally: if not sys.platform.startswith('java'): if sys.version_info >= (3, 2): sys.setswitchinterval(interval) else: sys.setcheckinterval(interval) class _TestLazyConnectMixin(object): """Test concurrent operations on a lazily-connecting client. Inherit from this class and from unittest.TestCase, and override _get_client(self, **kwargs), for testing a lazily-connecting client, i.e. a client initialized with _connect=False. Set use_greenlets = True to test with Gevent. """ use_greenlets = False NTRIALS = 5 NTHREADS = 10 def test_insert(self): def reset(collection): collection.drop() def insert(collection, _): collection.insert({}) def test(collection): self.assertEqual(NTHREADS, collection.count()) lazy_client_trial( reset, insert, test, self._get_client, self.use_greenlets) def test_save(self): def reset(collection): collection.drop() def save(collection, _): collection.save({}) def test(collection): self.assertEqual(NTHREADS, collection.count()) lazy_client_trial( reset, save, test, self._get_client, self.use_greenlets) def test_update(self): def reset(collection): collection.drop() collection.insert([{'i': 0}]) # Update doc 10 times. def update(collection, i): collection.update({}, {'$inc': {'i': 1}}) def test(collection): self.assertEqual(NTHREADS, collection.find_one()['i']) lazy_client_trial( reset, update, test, self._get_client, self.use_greenlets) def test_remove(self): def reset(collection): collection.drop() collection.insert([{'i': i} for i in range(NTHREADS)]) def remove(collection, i): collection.remove({'i': i}) def test(collection): self.assertEqual(0, collection.count()) lazy_client_trial( reset, remove, test, self._get_client, self.use_greenlets) def test_find_one(self): results = [] def reset(collection): collection.drop() collection.insert({}) results[:] = [] def find_one(collection, _): results.append(collection.find_one()) def test(collection): self.assertEqual(NTHREADS, len(results)) lazy_client_trial( reset, find_one, test, self._get_client, self.use_greenlets) def test_max_bson_size(self): # Client should have sane defaults before connecting, and should update # its configuration once connected. c = self._get_client(_connect=False) self.assertEqual(16 * (1024 ** 2), c.max_bson_size) self.assertEqual(2 * c.max_bson_size, c.max_message_size) # Make the client connect, so that it sets its max_bson_size and # max_message_size attributes. ismaster = c.db.command('ismaster') self.assertEqual(ismaster['maxBsonObjectSize'], c.max_bson_size) if 'maxMessageSizeBytes' in ismaster: self.assertEqual( ismaster['maxMessageSizeBytes'], c.max_message_size)
apache-2.0