prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Micros
oft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class SsoUri(Model): """SSO URI required to login to the supplemental portal. :param ss
o_uri_value: The URI used to login to the supplemental portal. :type sso_uri_value: str """ _attribute_map = { 'sso_uri_value': {'key': 'ssoUriValue', 'type': 'str'}, } def __init__(self, sso_uri_value=None): self.sso_uri_value = sso_uri_value
import math class GeoLocation: ''' Class representing a coordinate on a sphere, most likely Earth. This class is based from the code smaple in this paper: http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates The owner of that website, Jan Philip Matuschek, is the full owner of his intellectual property. The python port was realized by jfein: https://github.com/jfein/PyGeoTools/blob/master/geolocation.py ''' MIN_LAT = math.radians(-90) MAX_LAT = math.radians(90) MIN_LON = math.radians(-180) MAX_LON = math.radians(180) EARTH_RADIUS = 6378.1 # kilometers CONV_FACTOR = 0.621371 @classmethod def from_degrees(cls, deg_lat, deg_lon): rad_lat = math.radians(deg_lat) rad_lon = math.radians(deg_lon) return GeoLocation(rad_lat, rad_lon, deg_lat, deg_lon) @classmethod def from_radians(cls, rad_lat, rad_lon): deg_lat = math.degrees(rad_lat) deg_lon = math.degrees(rad_lon) return GeoLocation(rad_lat, rad_lon, deg_lat, deg_lon) def __init__( self, rad_lat, rad_lon, deg_lat, deg_lon ): self.rad_lat = float(rad_lat) self.rad_lon = float(rad_lon) self.deg_lat = float(deg_lat) self.deg_lon = float(deg_lon) self._check_bounds() def __str__(self): degree_sign= u'\N{DEGREE SIGN}' return ("({0:.4f}deg, {1:.4f}deg) = ({2:.6f}rad, {3:.6f}rad)").format( self.deg_lat, self.deg_lon, self.rad_lat, self.rad_lon) def _check_bounds(self): if (self.rad_lat < GeoLocation.MIN_LAT or self.rad_lat > GeoLocation.MAX_LAT or self.rad_lon < GeoLocation.MIN_LON or self.rad_lon > GeoLocation.MAX_LON): raise Exception("Illegal arguments") def distance_to(self, other, unit="kilometers", radius=EARTH_RADIUS): ''' Computes the great circle distance between this GeoLocation instance and the other. ''' distance = radius * math.acos( math.sin(self.rad_lat) * math.sin(other.rad_lat) + math.cos(self.rad_lat) * math.cos(other.rad_lat) * math.cos(self.rad_lon - other.rad_lon) ) if unit.lower() == "kilometers": return distance elif unit.lower() == "miles": return distance/GeoLocation.CONV_FACTORS else: raise Exception("Illegal arguments") def bounding_locations(self, dist, unit="kilometers", radius=EARTH_RADIUS): ''' Computes the bounding coordinates of all points on the surface of a sphere that has a great circle distance to the point represented by this GeoLocation instance that is less or equal to the distance argument. Param: distance - the distance from the point represented by this GeoLocation instance. Must be measured in the same unit as the radius argument (which is kilometers by default) radius - the radius of the sphere. defaults to Earth's radius. Returns a list of two GeoLoations - the SW corner and the NE corner - that represents the bounding box. ''' if unit.lower() == "kilometers": distance = dist elif unit.lower() == "miles": distance = dist/GeoLocation.CONV_FACTOR else: raise Exception("Illegal arguments") if radius < 0 or distance < 0: raise Exception("Illegal arguments") # angular distance in radians on a great circle rad_dist = distance / radius min_lat = self.rad_lat - rad_dist max_lat = self.rad_lat + rad_dist if min_lat > GeoLocation.MIN_LAT and max_lat < GeoLocation.MAX_LAT: delta_lon = math.asin(math.sin(rad_dist) / math.cos(self.rad_lat)) min_lon = self.rad_l
on - delta_lon if min_lon < GeoLocation.MIN_LON: min_lon += 2 * math.pi max_lon = self.rad_lon + delta_lon if max_lon > GeoLocation.MAX_LON: max_lon -= 2 * math.pi # a pole is within the distance else: min_lat = max(min_lat, GeoLocation.MIN_LAT) max_lat = min(max_lat, GeoLocation.MAX_LAT) min_lon = GeoLocation
.MIN_LON max_lon = GeoLocation.MAX_LON return [ GeoLocation.from_radians(min_lat, min_lon) , GeoLocation.from_radians(max_lat, max_lon) ] if __name__ == '__main__': # Test degree to radian conversion loc1 = GeoLocation.from_degrees(26.062951, -80.238853) loc2 = GeoLocation.from_radians(loc1.rad_lat, loc1.rad_lon) assert (loc1.rad_lat == loc2.rad_lat and loc1.rad_lon == loc2.rad_lon and loc1.deg_lat == loc2.deg_lat and loc1.deg_lon == loc2.deg_lon) # Test distance between two locations loc1 = GeoLocation.from_degrees(26.062951, -80.238853) loc2 = GeoLocation.from_degrees(26.060484,-80.207268) assert loc1.distance_to(loc2) == loc2.distance_to(loc1) # Test bounding box loc = GeoLocation.from_degrees(22.5,-135.0) distance = 1 # 1 kilometer SW_loc, NE_loc = loc.bounding_locations(distance) print loc.distance_to(SW_loc) print loc.distance_to(NE_loc)
import pytest from cplpy import run_test, prepare_config import subprocess as sp import os import glob class cd: """Context manager for changing the current working directory""" def __init__(self, newPath): self.newPath = os.path.expanduser(newPath) def __enter__(self): self.savedPath = os.getcwd() os.chdir(self.newPath) def __exit__(self, etype, value, traceback): os.chdir(self.savedPath) def get_subprocess_error(e): print("subprocess ERROR") import json error = json.loads(e[7:]) print(error['code'], error['message']) # -----MAPPING TESTS----- # EXPLANATION: These tests fail due to no_procs(MD) != k*no_procs(CFD), # k in [1,2,3,...] in one direction. MD_EXEC = "./md" CFD_EXEC = "./cfd" TEST_TEMPLATE_DIR = os.path.join(os.environ["CPL_PATH"], "test/templates") TEST_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.fixture() def prepare_config_fix(): #Try to setup code mdcodes = "array_stuff.f90 md_recvsend_cells.f90" bldmd = ("mpif90 " + mdcodes + "-I" + os.environ["CPL_PATH"] + "/include -L" + os.environ["CPL_PATH"] + "/lib " + "-Wl,-rpath=$CPL_PATH/lib/ -lcpl -o ./md") cfdcodes = "array_stuff.f90 cfd_sendrecv_cells.f90" bldcfd= ("mpif90 " + cfdcodes + " -I" + os.environ["CPL_PATH"] + "/include " + " -L" + os.environ["CPL_PATH"] + "/lib " + "-Wl,-rpath=$CPL_PATH/lib/ -lcpl -o ./cfd") with cd(TEST_DIR): try: out = sp.check_output("rm -f md cfd", shell=True) out = sp
.check_output(bldmd, shell=True) out = sp.check_output(bldcfd, shell=True) except sp.CalledProcessError as e: if e.output.startswith('error: {'): get_subprocess_error(e.outp
ut) def test_memory_leak(): #Try to run code cmd = ("mpiexec -n 4 valgrind --leak-check=full --log-file='vg_md.%q{PMI_RANK}' ./md " + ": -n 2 valgrind --leak-check=full --log-file='vg_cfd.%q{PMI_RANK}' ./cfd") with cd(TEST_DIR): try: out = sp.check_output("rm -f vg_*", shell=True) out = sp.check_output(cmd, shell=True) except sp.CalledProcessError as e: if e.output.startswith('error: {'): get_subprocess_error(e.output) #Check error files = glob.glob("vg_*") for filename in files: with open(filename,'r') as f: filestr = f.read() findstr= "definitely lost:" indx = filestr.find(findstr) line = filestr[indx+len(findstr):].split("\n")[0] print(line) assert int(line.split(" ")[1]) == 0 #@pytest.fixture() #def prepare_config_fix(tmpdir): # prepare_config(tmpdir, TEST_DIR, MD_FNAME, CFD_FNAME) # #Build code # try: # check_output("./build.sh", stderr=STDOUT, shell=True) # except: # raise #@pytest.mark.parametrize("cfdprocs, mdprocs, err_msg", [ # ((1, 2, 1), (2, 2, 1), "")]) #def test_valgrind(prepare_config_fix, cfdprocs, mdprocs, err_msg): # MD_PARAMS = {"lx": 24.0, "ly": 24.0, "lz": 24.0} # MD_PARAMS["npx"], MD_PARAMS["npy"], MD_PARAMS["npz"] = mdprocs # CFD_PARAMS = {"lx": 24.0, "ly": 24.0, "lz": 24.0, # "ncx": 24, "ncy": 24, "ncz": 24, # "which_test": "cell_test"} # CFD_PARAMS["npx"], CFD_PARAMS["npy"], CFD_PARAMS["npz"] = cfdprocs # CONFIG_PARAMS = {"cfd_bcx": 1, "cfd_bcy": 1, "cfd_bcz": 1, # "olap_xlo": 1, "olap_xhi": 24, # "olap_ylo": 1, "olap_yhi": 4, # "olap_zlo": 1, "olap_zhi": 24, # "cnst_xlo": 1, "cnst_xhi": 1, # "cnst_ylo": 1, "cnst_yhi": 1, # "cnst_zlo": 1, "cnst_zhi": 1, # "tstep_ratio": 50, } # parametrizeConfig(template_dir, config_params)
', priority=100, condition=lambda: not current_user.is_anonymous) @bp.route('/') @login_required def root(): return angular.template('tooltool.html', url_for('.static', filename='tooltool.js'), url_for('.static', filename='tooltool.css')) @bp.route('/upload') @api.apimethod([types.UploadBatch], unicode) def search_batches(q): """Search upload batches. The required query parameter ``q`` can match a substring of an author's email or a batch message.""" tbl = tables.Batch q = tbl.query.filter(sa.or_( tbl.author.contains(q), tbl.message.contains(q))) return [row.to_json() for row in q.all()] @bp.route('/upload/<int:id>') @api.apimethod(types.UploadBatch, int) def get_batch(id): """Get a specific upload batch by id.""" row = tables.Batch.query.filter(tables.Batch.id == id).first() if not row: raise NotFound return row.to_json() @bp.route('/upload', methods=['POST']) @api.apimethod(types.UploadBatch, unicode, body=types.UploadBatch) def upload_batch(region=None, body=None): """Create a new upload batch. The response object will contain a ``put_url`` for each file which needs to be uploaded -- which may not be all! The caller is then responsible for uploading to those URLs. The resulting signed URLs are valid for one hour, so uploads should begin within that timeframe. Consider using Amazon's MD5-verification capabilities to ensure that the uploaded files are transferred correctly, although the tooltool server will verify the integrity anyway. The upload must have the header ``Content-Type: application/octet-stream```. The query argument ``region=us-west-1`` indicates a preference for URLs in that region, although if the region is not available then URLs in other regions may be returned. The returned URLs are only valid for 60 seconds, so all upload requests must begin within that timeframe. Clients should therefore perform all uploads in parallel, rather than sequentially. This limitation is in place to prevent malicious modificatio
n of files after they have been verified.""" region, bucket = get_region_and_bucket(region) if not body.message: raise BadRequest("message must be non-empty") if not body.files: raise BadRequest("a batch must include at least one file") if body.author: raise BadRequest("Author must not be specified for upload") try: body.author = current_user.aut
henticated_email except AttributeError: # no authenticated_email -> use the stringified user (probably a token # ID) body.author = str(current_user) # verify permissions based on visibilities visibilities = set(f.visibility for f in body.files.itervalues()) for v in visibilities: prm = p.get('tooltool.upload.{}'.format(v)) if not prm or not prm.can(): raise Forbidden("no permission to upload {} files".format(v)) session = g.db.session(tables.DB_DECLARATIVE_BASE) batch = tables.Batch( uploaded=time.now(), author=body.author, message=body.message) s3 = current_app.aws.connect_to('s3', region) for filename, info in body.files.iteritems(): log = logger.bind(tooltool_sha512=info.digest, tooltool_operation='upload', tooltool_batch_id=batch.id, mozdef=True) if info.algorithm != 'sha512': raise BadRequest("'sha512' is the only allowed digest algorithm") if not is_valid_sha512(info.digest): raise BadRequest("Invalid sha512 digest") digest = info.digest file = tables.File.query.filter(tables.File.sha512 == digest).first() if file and file.visibility != info.visibility: raise BadRequest("Cannot change a file's visibility level") if file and file.instances != []: if file.size != info.size: raise BadRequest("Size mismatch for {}".format(filename)) else: if not file: file = tables.File( sha512=digest, visibility=info.visibility, size=info.size) session.add(file) log.info("generating signed S3 PUT URL to {} for {}; expiring in {}s".format( info.digest[:10], current_user, UPLOAD_EXPIRES_IN)) info.put_url = s3.generate_url( method='PUT', expires_in=UPLOAD_EXPIRES_IN, bucket=bucket, key=util.keyname(info.digest), headers={'Content-Type': 'application/octet-stream'}) # The PendingUpload row needs to reflect the updated expiration # time, even if there's an existing pending upload that expires # earlier. The `merge` method does a SELECT and then either UPDATEs # or INSERTs the row. However, merge needs the file_id, rather than # just a reference to the file object; and for that, we need to flush # the inserted file. session.flush() pu = tables.PendingUpload( file_id=file.id, region=region, expires=time.now() + datetime.timedelta(seconds=UPLOAD_EXPIRES_IN)) session.merge(pu) session.add(tables.BatchFile(filename=filename, file=file, batch=batch)) session.add(batch) session.commit() body.id = batch.id return body @bp.route('/upload/complete/sha512/<digest>') @api.apimethod(unicode, unicode, status_code=202) def upload_complete(digest): """Signal that a file has been uploaded and the server should begin validating it. This is merely an optimization: the server also polls occasionally for uploads and validates them when they appear. Uploads cannot be safely validated until the upload URL has expired, which occurs a short time after the URL is generated (currently 60 seconds but subject to change). If the upload URL has expired, then the response is an HTTP 202 indicating that the signal has been accepted. If the URL has not expired, then the response is an HTTP 409, and the ``X-Retry-After`` header gives a time, in seconds, that the client should wait before trying again.""" if not is_valid_sha512(digest): raise BadRequest("Invalid sha512 digest") # if the pending upload is still valid, then we can't check this file # yet, so return 409 Conflict. If there is no PU, or it's expired, # then we can proceed. file = tables.File.query.filter(tables.File.sha512 == digest).first() if file: for pu in file.pending_uploads: until = pu.expires - time.now() if until > datetime.timedelta(0): # add 1 second to avoid rounding / skew errors hdr = {'X-Retry-After': str(1 + int(until.total_seconds()))} return Response(status=409, headers=hdr) # start a celery task in the background and return immediately grooming.check_file_pending_uploads.delay(digest) return '{}', 202 @bp.route('/file') @api.apimethod([types.File], unicode) def search_files(q): """Search for files matching the query ``q``. The query matches against prefixes of hashes (at least 8 characters) or against filenames.""" session = g.db.session(tables.DB_DECLARATIVE_BASE) query = session.query(tables.File).join(tables.BatchFile) query = query.filter(sa.or_( tables.BatchFile.filename.contains(q), tables.File.sha512.startswith(q))) return [row.to_json() for row in query.all()] @bp.route('/file/sha512/<digest>') @api.apimethod(types.File, unicode, unicode) def get_file(digest): """Get a single file, by its digest. Filenames are associated with upload batches, not directly with files, so use ``GET /uploads`` to find files by filename. The returned File instance contains an ``instances`` attribute showing the regions in which the file exists.""" row = tables.File.query.filter(tables.File.sha512 == digest).first() if not
############################################################################### ## File : b64decode.py ## Description: Base64 decode a supplied list of strings ## : ## Created_On : Wed Sep 26 12:33:16 2012 ## Created_By : Rich Smith (rich@kyr.us) ## Modified_On: Tue Jan 29 16:42:41 2013 ## Modified_By: Rich Smith (rich@kyr.us) ## License : BSD-3 ## ## ################################################################
############### import base64 __author__ = "rich@kyr.us" __version__ = 1.0 __updated__ = "26/09/2012" __help__ = "Module for decoding a string from Base64 representation" __alias__ = ["b64d"] def Command(pymyo, name, *args): """ Base64 decode each argument supplied """ for s in args: try: py
myo.output( base64.decodestring(s) ) except: pymyo.error("Error decoding %s"%(s) )
desFromObjects(oObjList): return fileNodesFromShaders(shadersFromObjects(oObjList)) def fileNodesFromShaders(oMatList): oFileNodeList = set() for oMat in oMatList: oFileNodeList.update(oMat.listHistory(type="file")) return list(oFileNodeList) def shadersFromObjects(objList, connectedTo=""): sAttrName = connectedTo if not objList: return [] oMatSgList = shadingGroupsFromObjects(objList) oMatList = [] for oMatSg in oMatSgList: sName = oMatSg.attr(sAttrName).name() if connectedTo else oMatSg.name() oMatList.extend(pm.ls(listForNone(mc.listConnections(sName, source=True, destination=False)), type=mc.listNodeTypes('shader', ex="texture"))) return oMatList def shadingGroupsFromObjects(objList): oShdGrpList = set() for obj in objList: oObj = obj if isinstance(obj, pm.PyNode) else pm.PyNode(obj) oShdGrpList.update(shadingGroupsForObject(oObj)) return list(oShdGrpList) def shadingGroupsForObject(oObj, warn=True): oShdGrpList = [] oShape = None if isinstance(oObj, pm.general.MeshFace): indiceList = oObj.indices() for oShdEng in oObj.listHistory(type="shadingEngine"): if set(indiceList).intersection(set(oShdEng.members()[0].indices())): oShdGrpList.append(oShdEng) elif isinstance(oObj, pm.general.NurbsSurfaceFace): oShape = oObj.node() elif isinstance(oObj, pm.nt.Transform): oShape = oObj.getShape() elif isinstance(oObj, (pm.nt.Mesh, pm.nt.NurbsSurface)): oShape = oObj elif warn: logMsg("Can't get shading groups from {}".format(repr(oObj)) , warning=True) if not oShdGrpList: if oShape: oShdGrpList = oShape.shadingGroups() if not oShdGrpList: oShdGrpList = oShape.connections(type="shadingEngine") return oShdGrpList def conformShadingNetworkToNamespace(oMeshList, sNamespaceToMatch , **kwargs): bForce = kwargs.get("force", False) oShadingGroupMembersDct = {} oMatNotConformList = [] for oShape in oMeshList: # print "\nfor shape: ", oShape oMatSGList = shadingGroupsForObject(oShape) for oMatSG in oMatSGList: # print "for shadingGroup: ", oMatSG oMatList = pm.ls(oMatSG.inputs(), type=mc.listNodeTypes('shader', ex="texture")) oMat = oMatList[0] ##ignore shadingGroups where materials are defaultNode if oMat.isDefaultNode(): continue ##ignore shadingGroups where materials are already in namespace to match sMatNamespace = oMat.namespace() # print "sMatNamespace", sMatNamespace # print "sNamespaceToMatch", sNamespaceToMatch if sMatNamespace == sNamespaceToMatch: continue else: oMatNotConformList.append(oMat) oMembers = oMatSG.members() for oMember in oMembers: # print "member :", oMember if oMember.node() == oShape: oShadingGroupMembersDct.setdefault(oMatSG, []).append(oMember) # for k, v in oShadingGroupMembersDct.iteritems(): # print "for shadingGroup: ", k, ", specific members are: ", v if oMatNotConformList: if bForce: pass else: result = pm.confirmDialog(title='Materials not conform to Namespace...' , message="Found materials not conform to Namespace,\nCopy Shading Network, Conform to Namespace & Assign ?" , button=["OK", 'Cancel'] , defaultButton='Cancel' , cancelButton='Cancel' , dismissString='Cancel') if result == "Cancel": pm.warning("Materials Namespace conformation cancelled.") return bForce else: bForce = True else: if sNamespaceToMatch: logMsg('Materials already conformed to Namespace: "{0}"'.format(sNamespaceToMatch) , warning=True) return bForce ##Force current namespace to the one to match to duplicate in this namespace mc.namespace(set=":") mc.namesp
ace(set=sNamespaceToMatch if sNamespaceToMatch else ":") oMatNotConformList = [] oShapeAssignedList = [] for oMatSG, oMembers in oShadingGroupMembersDct.iteritems(): oNewMatSGs = pm.duplicate(oMatS
G, rr=True, un=True) oNewMatSG = oNewMatSGs[0] # print "old shadingGroup: ", oMatSG # print "new shadingGroup: ", oNewMatSGs[0] # print "oMembers", oMembers # print oMembers[0] for oMember in oMembers: oShape = oMember.node() if oShape not in oShapeAssignedList: oShapeAssignedList.append(oShape) try: pm.sets(oNewMatSG, e=True, forceElement=oShape) logMsg('Material "{0}" assigned first to: "{1}"'.format(oNewMatSG, oShape) , warning=True) except: logMsg('Could not assign material "{0}" first to: "{1}"'.format(oNewMatSG, oShape) , warning=True) try: pm.sets(oNewMatSG, e=True, forceElement=oMembers) logMsg('Material "{0}" assigned to: "{1}"'.format(oNewMatSG, oMembers) , warning=True) except: logMsg('Could not assign material "{0}" to: "{1}"'.format(oNewMatSG, oMembers) , warning=True) mc.namespace(set=":") return bForce def transferUvAndShaders(oSrcGrp, oDestGrp): notCompatibleShapeList = [] sSourceNameSpace = oSrcGrp.namespace() notFoundList = [] transferList = [] oTargetList = pm.ls(oDestGrp, dag=True, tr=True) #searchCount = len(oTargetList) for oTargetXfm in oTargetList: oShape = oTargetXfm.getShape(ni=True) if isinstance(oShape, pm.nt.Mesh): sXfmName = oTargetXfm.nodeName() sSourceName = sSourceNameSpace + sXfmName oSourceXfm = pm.PyNode(sSourceName) if oSourceXfm: transferList.append((oSourceXfm, oTargetXfm)) # print oSourceXfm, oTargetXfm else: notFoundList.append(oTargetXfm) print 'No match found for "{0}"'.format(sXfmName) print "Searching... {0}".format(oTargetXfm.nodeName()) # oSet = fncTools.checkSet("noMatchFound") # if notFoundList: # pm.sets(oSet, addElement=notFoundList) result = pm.confirmDialog(title='Transfer Uvs', message='Found {0}/{1} mismatches :'.format(len(notFoundList), len(transferList)), button=['Ok', 'Cancel'], defaultButton='Cancel', cancelButton='Cancel', dismissString='Cancel') if result == 'Cancel': return else : for oSourceXfm, oTargetXfm in transferList: oSourceShape = oSourceXfm.getShape(ni=True) oHistList = oTargetXfm.listHistory() oShapeList = pm.ls(oHistList, type="mesh") oTargetShape = None bShapeOrig = False oTargetCurrentShape = oTargetXfm.getShape(ni=True) if len(oShapeList) > 1: for oShape in oShapeList: if oShape.getAttr("intermediateObject") and oShape.attr("worldMesh").outputs(): bShapeOrig = True oShape.setAttr("intermediateObject", False) oTargetShape = oShape break else: oTargetShape = oTargetCurrentShape if oTargetShape: try: print ('transferring uvs and shaders from "{0}" to "{1}"' .format(oSourceShape, oTargetShape)) if oTargetCurrentShape.numVertices() != o
#!/usr/bin/python impor
t os import json def main(): print("Sample Post Script") files = json.loads(os.environ.get('MH_FILES')) for filename in files: print(filename) if __name__ == "__main__":
main()
from kompromatron.core import app from kompromatron.views.base i
mport base # app.register_blueprint(entitie
s) # app.register_blueprint(relations) #app.register_blueprint(base)
-min=LONGITUDE_MIN #                        The min longitude (float in the interval [-180 ; 180]) #  -X LONGITUDE_MAX, --longitude-max=LONGITUDE_MAX #                        The max longitude (float in the interval [-180 ; 180]) #  -z DEPTH_MIN, --depth-min=DEPTH_MIN #                        The min depth (float in the interval [0 ; 2e31] or #                        string 'Surface') #  -Z DEPTH_MAX, --depth-max=DEPTH_MAX #                        The max depth (float in the interval [0 ; 2e31] or #                        string 'Surface') # Area : x east-west longitude, y north-south latitude, z depth xmin_longitude = "-45" xmax_longitude = "-20" ymin_latitude = "57" ymax_latitude = "61" z
min_depth = "0.494" zmax_depth = "0.4942" # Date - Timerange yyyystart = 2
007 mmstart = 01 yyyyend = 2007 mmend = 12 hhstart = " 12:00:00" hhend = " 12:00:00" dd = 1 # Output files out_path= "C:\Users\Sam\Downloads\glorys_data" pre_name= "TestPythonExtr_" #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #                     Main Program # #          Motu Client Call through Python Loop # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Specific comment For WINDOWS USER: # If you're using this script for the first time, you # shouldn't be worried by the following. Just save your # script (ctrl + s), quit (alt + F4) and launch it # (WinKey + R then input cmd then ENTER) by typing # "C:\Python27\python script_name.py" # # For users, be careful if you have to modify the lines below. # CMEMS Central Service Desk will be happy to help you # either via email (servicedesk.cmems@mercator-ocean.eu) # or via the CMEMS Forum (http://bit.ly/1L1Iy5f) # Get PYTHON PATH depending on OS if platform.system() == "Windows":     PYTHON = "C:/Python27/python.exe" else:     PYTHON = "/usr/bin/python"   # Check motu-client.py file exists if not os.path.exists(motu_cl):     print "\n[ERROR] Path to motu-client.py cannot be found: %s\n\n[INFO] Please correct value of 'motu_cl' variable."%motu_cl     print "\n\n[INFO] If you haven't downloaded the motu-client-python yet, get the latest version here:\nhttps://github.com/clstoulouse/motu-client-python/releases/latest\n"     sys.exit() # Check if output directory is well formated and if it exists, otherwise create it absolute_path_substring = ['/home/', 'C:\\'] if local_storage_directory_name[-1] != '/':     local_storage_directory_name = local_storage_directory_name + "/" if not any(x in local_storage_directory_name for x in absolute_path_substring):     local_storage_directory_name = os.path.expanduser('~') + "/" + local_storage_directory_name if not os.path.exists(local_storage_directory_name):     os.makedirs(local_storage_directory_name) # Flags to let the server clears the buffer - better to be respectful when retrieving OPEN data buffer_flag = False cmd_flag = False # Error Handle on dates (to illustrate an if statement >) if yyyystart>yyyyend:     print "[ERROR] in [Date Parameters]"     print """Please double check your date parameters, specifically the "yyyystart" which is currently greater than "yyyyend."""     print """End of data extraction service."""     sys.exit() # Other variable definitions to be compatible with deprecated script versions still available on the Internet pre_name = "CMEMS_" + (serv_id.split()[1]).split("-")[0] + "_" log_cmems = "-u " + username_cmems pwd_cmems = "-p " + password_cmems motu_id = "-m " + motu_serv_id serv_id = "-s " + service_prod_id pre_fic_cmd = "-f "+ pre_name out_cmd = "-o " + local_storage_directory_name proxy_user = "--proxy-user " + proxy_user_login proxy_pwd = "--proxy-pwd " + proxy_user_password proxy_server = "--proxy-server " + proxy_server_url + ":" + proxy_server_port xmin = "-x " + xmin_longitude xmax = "-X " + xmax_longitude ymin = "-y " + ymin_latitude ymax = "-Y " + ymax_latitude zmin = "-z " + zmin_depth zmax = "-Z " + zmax_depth # To illustrate a simple Error Handle to delete a file when desired try:     os.remove(out_cmd.split()[1] + logfile) except OSError:     print "" print"\n+----------------------------+\n| ! - CONNEXION TO CMEMS HUB |\n+----------------------------+\n\n" # To illustrate a For_Loop in order to generate download requests for several datasets held in a product for key, value in dict_id.iteritems():          if buffer_flag:         print "Little pause to let the server clearing the buffer, it will automatically resume once it's completed.\nNot mandatory but server-friendly <span class="Emoticon Emoticon1"><span>:-)</span></span>\n"         time.sleep(2)         buffer_flag = False     # Date declaration     date_start = dt.datetime(yyyystart,mmstart,dd,0,0)     date_end = dt.datetime(yyyyend,mmend,dd,0,0)     # To illustrate a While_Loop in order to extract dailymean data, packed by month (Jan., Fev., Mar. etc...),     # for as many download requests as number of months available in the timerange.     while (date_start<=date_end):         date_end_cmd = (dt.datetime(date_start.year, date_start.month,\         calendar.monthrange(date_start.year, date_start.month)[1]))         date_cmd = ' -t \"' + date_start.strftime("%Y-%m-%d") + hhstart + '\"'\         +' -T \"' + date_end_cmd.strftime("%Y-%m-%d") + hhend + '\"'         fic_cmd = pre_fic_cmd + key + "_" + date_end_cmd.strftime("%Y-%m") + ".nc"         ficout = pre_name + key + "_" + date_end_cmd.strftime("%Y-%m") + ".nc"         print "----------------------------------\n- ! - Processing dataset request : %s"%ficout         print "----------------------------------\n"         if not os.path.exists(out_cmd.split()[1] + ficout):             if proxy_flag:                 if not zmin_depth:                     cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\                                 motu_id, serv_id, value[1],\                                 xmin, xmax, ymin, ymax,\                                 date_cmd, value[0], out_cmd, fic_cmd,\                                 proxy_server, proxy_user, proxy_pwd, "-q"])                 else:                     cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\                                 motu_id, serv_id, value[1],\                                 xmin, xmax, ymin, ymax, zmin, zmax,\                                 date_cmd, value[0], out_cmd, fic_cmd,\                                 proxy_server, proxy_user, proxy_pwd, "-q"])             else:                 if not zmin_depth:                     cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\                                 motu_id, serv_id, value[1],\                                 xmin, xmax, ymin, ymax,\                                 date_cmd, value[0], out_cmd, fic_cmd, "-q"])                 else:                     cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\                                 motu_id, serv_id, value[1],\                                 xmin, xmax, ymin, ymax, zmin, zmax,\                                 date_cmd, value[0], out_cmd, fic_cmd, "-q"])             print "## MOTU API COMMAND ##"             print cmd             print "\n[INFO] CMEMS server is checking both your credentials and command syntax. If successful, it will extract the data and create your dataset on the fly. Please wait. \n"             subpro=subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)             message,erreur = subpro.communicate()             stat = subpro.returncode             if stat != 0:                     print "-- ERROR Incorrect Credentials :\n %s"%message                     with open(out_cmd.split()[1] + logfile,'a') as mylog:                         mylog.write("Error : %s NOK\nDue to : %s"%(ficout,message))                     if 'HTTP Error 400' in message:                         print '''[INFO] Copernicus Marine USERNAME ('username_cmems') and/or PASSWORD ('password_cmems') are incorrect.\n\n[INFO] To execute the MOTU API COMMAND from your shell/terminal, please note the following rules:\n                         On *nix OS, you must use the single quote, otherwise it may expand specif
from django import forms from django.contrib.auth.models import User from django.contrib.auth.forms import UserCreationForm from trainer.models import Language class AddWordForm(forms.Form): language = forms.ModelChoiceField(queryset=Language.objects.all()) word = forms.CharField(required=True) class CreateSetForm(forms.Form): name = models.CharField(default="") class UserCreateForm(UserCreationForm): email = forms.EmailField(required=True) first_name = forms.CharField(required=True) last_name = forms.CharField(required=True) class Meta: model = User fields = ("username", "email", "first_name", "last_name", "password1", "password2") def save(self, commit=True): user = super(UserCreateForm,self).save(commit=False) user
.emai
l = self.cleaned_data["email"] user.name = self.cleaned_data["first_name"] user.prename = self.cleaned_data["last_name"] if commit: user.save() return user class LoginForm(forms.Form): username = forms.CharField(required=True) password = forms.CharField(widget=forms.PasswordInput()) class UploadFileForm(forms.Form): language = forms.ModelChoiceField(label='Language', queryset=Language.objects.all(), required=True) file = forms.FileField(required=True)
import logging from flask import ( Flask, request, Response ) import requests app = Flask(__name__) @app.route('/<path:url>', methods=['GET', 'POST', 'PUT', 'PATCH']) def proxy(
url): # extract the re
quest info and change its destination # how to deal with socketio if url == "socket.io/": target = request.base_url else: # target = f"http://localhost:80/{url}" target = f"http://www.google.com/{url}" data = request.data or request.form logging.debug(f'url: {url}, target: {target}') truely_request = requests.Request(method=request.method, url=target, headers=request.headers, data=data, cookies=request.cookies) resp = requests.Session().send(truely_request.prepare()) logging.debug(resp.content) response = app.make_response((resp.content, resp.status_code, resp.headers.items())) for k, v in resp.cookies: response.set_cookie(k, v) return response if __name__ == "__main__": app.run(host="0.0.0.0", port=9999)
""" WSGI config for SysuLesson project. It exposes the WSGI callable as a module-level variable named ``application``. For more infor
mation on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi im
port get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SysuLesson.settings") application = get_wsgi_application()
from django.contrib import admin
'''from tester.models import Club,Member,Signup,Event class admin_club(admin.ModelAdmin): list_display=["club_name"] class admin_event(admin.ModelAdmin): list_display=["event_name"] class admin_student(admin.ModelAdmin): list_display=["usn","name"] class admin_member(admin.ModelAdmin): list_display=["club_id","usn"] admin.site.register(Club,admin_club) admin.site.register(Member,admin_member) admin.site.register(Signup,admin_student) admin.site.register
(Event,admin_event) '''
State['t'], minThreshold = self.activationThreshold) if i is None: continue # Turn on the predicted state for the best matching cell and queue # the pertinent segment up for an update, which will get processed if # the cell receives bottom up in the future. self.lrnPredictedState['t'][c, i] = 1 if readOnly: continue # Queue up this segment for updating segUpdate = self.getSegmentActiveSynapses( c, i, s, activeState=self.lrnActiveState['t'], newSynapses=(numActive < self.newSynapseCount)) s.totalActivations += 1 # increment totalActivations self.addToSegmentUpdates(c, i, segUpdate) if self.doPooling: # creates a new pooling segment if no best matching segment found # sum(all synapses) >= minThreshold, "weak" activation predSegment = self.getBestMatchingSegment(c, i, self.lrnActiveState['t-1']) segUpdate = self.getSegmentActiveSynapses(c, i, predSegment, self.lrnActiveState['t-1'], newSynapses=True) self.addToSegmentUpdates(c, i, segUpdate) def updateLearningState(self, activeColumns): """ Update the learning state. Called from compute() on every iteration @param activeColumns List of active column indices """ # Copy predicted and active states into t-1 self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :] self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :] # Update our learning input history if self.maxLrnBacktrack > 0: if len(self._prevLrnPatterns) > self.maxLrnBacktrack: self._prevLrnPatterns.pop(0) self._prevLrnPatterns.append(activeColumns) if self.verbosity >= 4: print "Previous learn patterns: \n" print self._prevLrnPatterns # Process queued up segment updates, now that we have bottom-up, we # can update the permanences on the cells that we predicted to turn on # and did receive bottom-up self.processSegmentUpdates(activeColumns) # Decrement the PAM counter if it is running and increment our learned # sequence length if self.pamCounter > 0: self.pamCounter -= 1 self.learnedSeqLength += 1 # Phase 1 - turn on the predicted cell in each column that received # bottom-up. If there was no predicted cell, pick one to learn to. if not self.resetCalled: # Uses lrnActiveState['t-1'] and lrnPredictedState['t-1'] # computes lrnActiveState['t'] inSequence = self.learnPhase1(activeColumns) # Reset our PAM counter if we are in sequence if inSequence: self.pamCounter = self.pamLength # Print status of PAM counter, learned sequence length if self.verbosity >= 3: print "pamCounter = ", self.pamCounter, "seqLength = ", \ self.learnedSeqLength # Start over on start cells if any of the following occur: # 1.) A reset was just called # 2.) We have been loo long out of sequence (the pamCounter has expired) # 3.) We have reached maximum allowed sequence length. # # Note that, unless we are following a reset, we also just learned or # re-enforced connections to the current set of active columns because # this input is still a valid prediction to learn. # # It is especially helpful to learn the connections to this input when # you have a maxSeqLength constraint in place. Otherwise, you will have # no continuity at all between sub-sequences of length maxSeqLength. if (self.resetCalled or self.pamCounter == 0 or (self.maxSeqLength != 0 and self.learnedSeqLength >= self.maxSeqLength)): if self.verbosity >= 3: if self.resetCalled: print "Starting over:", activeColumns, "(reset was called)" elif self.pamCounter == 0: print "Starting over:", activeColumns, "(PAM counter expired)" else: print "Starting over:", activeColumns, "(reached maxSeqLength)" # Update average learned sequence length - this is a diagnostic statistic if self.pamCounter =
= 0: seqLength = self.learnedSeqLength - self.pamLength else: seqLength = self.learnedSeqLength if self.verbosity >= 3: print " learned sequence length was:", seqLength self._updateAvgLearnedSeqLength(seqLength) # Backtrack to an earlier
starting point, if we find one backSteps = 0 if not self.resetCalled: backSteps = self.learnBacktrack() # Start over in the current time step if reset was called, or we couldn't # backtrack. if self.resetCalled or backSteps == 0: self.lrnActiveState['t'].fill(0) for c in activeColumns: self.lrnActiveState['t'][c, 0] = 1 # Remove any old input history patterns self._prevLrnPatterns = [] # Reset PAM counter self.pamCounter = self.pamLength self.learnedSeqLength = backSteps # Clear out any old segment updates from prior sequences self.segmentUpdates = {} # Phase 2 - Compute new predicted state. When computing predictions for # phase 2, we predict at most one cell per column (the one with the best # matching segment). self.learnPhase2() def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): """ Handle one compute, possibly learning. @param bottomUpInput The bottom-up input, typically from a spatial pooler @param enableLearn If true, perform learning @param computeInfOutput If None, default behavior is to disable the inference output when enableLearn is on. If true, compute the inference output If false, do not compute the inference output @returns TODO: document It is an error to have both enableLearn and computeInfOutput set to False By default, we don't compute the inference output when learning because it slows things down, but you can override this by passing in True for computeInfOutput """ # As a speed optimization for now (until we need online learning), skip # computing the inference output while learning if computeInfOutput is None: if enableLearn: computeInfOutput = False else: computeInfOutput = True assert (enableLearn or computeInfOutput) # Get the list of columns that have bottom-up activeColumns = bottomUpInput.nonzero()[0] if enableLearn: self.lrnIterationIdx += 1 self.iterationIdx += 1 if self.verbosity >= 3: print "\n==== PY Iteration: %d =====" % (self.iterationIdx) print "Active cols:", activeColumns # Update segment duty cycles if we are crossing a "tier" # We determine if it's time to update the segment duty cycles. Since the # duty cycle calculation is a moving average based on a tiered alpha, it is # important that we update all segments on each tier boundary if enableLearn: if self.lrnIterationIdx in Segment.dutyCycleTiers: for c, i in itertools.product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)): for segment in self.cells[c][i]: segment.dutyCycle() # Update the average input density if self.avgInputDensity is None: self.avgInputDensity = len(activeColumns) else: self.avgInputDensity = (0.99 * self.avgInputDensity + 0.01 * len(activeColumns)) # First, update the inference state # As a speed optimization for now (until we need online learning), skip # computing the inference output while learning if computeInfOutput: self.updateInferenceState(activeColumns) # Next, update the learning state if enableLearn: self.updateLearningState(activeColumns) # Apply global decay, and remove synapses and/or segments. # Synapses are removed if their permanence value is <= 0. # Segments are removed when they don't have synapses anymore. # Removal of synap
'role_id': self.roles.first().id, 'confirm_password': 'four'} res = self.client.post(USER_CREATE_URL, formData) self.assertFormError( res, "form", 'password', ['Password must be between 8 and 18 characters.']) @test.create_stubs({api.keystone: ('get_default_domain', 'tenant_list', 'role_list', 'get_default_role')}) def test_create_validation_for_password_too_long(self): user = self.users.get(id="1") domain = self._get_default_domain() domain_id = domain.id api.keystone.get_default_domain(IgnoreArg()) \ .MultipleTimes().AndReturn(domain) api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=None) \ .AndReturn([self.tenants.list(), False]) api.keys
tone.role_list(IgnoreArg()).AndReturn(self.roles.list()) api.keystone.get_defa
ult_role(IgnoreArg()) \ .AndReturn(self.roles.first()) self.mox.ReplayAll() # check password min-len verification formData = {'method': 'CreateUserForm', 'domain_id': domain_id, 'name': user.name, 'email': user.email, 'password': 'MoreThanEighteenChars', 'project': self.tenant.id, 'role_id': self.roles.first().id, 'confirm_password': 'MoreThanEighteenChars'} res = self.client.post(USER_CREATE_URL, formData) self.assertFormError( res, "form", 'password', ['Password must be between 8 and 18 characters.']) @test.create_stubs({api.keystone: ('user_get', 'domain_get', 'tenant_list', 'user_update_tenant', 'user_update_password', 'user_update', 'roles_for_user', )}) def _update(self, user): user = self.users.get(id="1") domain_id = user.domain_id domain = self.domains.get(id=domain_id) test_password = 'normalpwd' email = getattr(user, 'email', '') api.keystone.user_get(IsA(http.HttpRequest), '1', admin=True).AndReturn(user) api.keystone.domain_get(IsA(http.HttpRequest), domain_id).AndReturn(domain) api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \ .AndReturn([self.tenants.list(), False]) api.keystone.user_update(IsA(http.HttpRequest), user.id, email=email, name=u'test_user', password=test_password, project=self.tenant.id).AndReturn(None) self.mox.ReplayAll() formData = {'method': 'UpdateUserForm', 'id': user.id, 'name': user.name, 'email': email, 'password': test_password, 'project': self.tenant.id, 'confirm_password': test_password} res = self.client.post(USER_UPDATE_URL, formData) self.assertNoFormErrors(res) @test.create_stubs({api.keystone: ('user_get', 'domain_get', 'tenant_list', 'user_update_tenant', 'user_update_password', 'user_update', 'roles_for_user', )}) def test_update_with_no_email_attribute(self): user = self.users.get(id="5") domain_id = user.domain_id domain = self.domains.get(id=domain_id) api.keystone.user_get(IsA(http.HttpRequest), '1', admin=True).AndReturn(user) api.keystone.domain_get(IsA(http.HttpRequest), domain_id).AndReturn(domain) api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \ .AndReturn([self.tenants.list(), False]) api.keystone.user_update(IsA(http.HttpRequest), user.id, email=user.email, name=user.name, password=user.password, project=self.tenant.id).AndReturn(None) self.mox.ReplayAll() formData = {'method': 'UpdateUserForm', 'id': user.id, 'name': user.name, 'email': "", 'password': user.password, 'project': self.tenant.id, 'confirm_password': user.password} res = self.client.post(USER_UPDATE_URL, formData) self.assertNoFormErrors(res) @test.create_stubs({api.keystone: ('user_get', 'domain_get', 'tenant_list', 'user_update_tenant', 'keystone_can_edit_user', 'roles_for_user', )}) def test_update_with_keystone_can_edit_user_false(self): user = self.users.get(id="1") domain_id = user.domain_id domain = self.domains.get(id=domain_id) api.keystone.user_get(IsA(http.HttpRequest), '1', admin=True).AndReturn(user) api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \ .AndReturn(domain) api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \ .AndReturn([self.tenants.list(), False]) api.keystone.keystone_can_edit_user().AndReturn(False) api.keystone.keystone_can_edit_user().AndReturn(False) self.mox.ReplayAll() formData = {'method': 'UpdateUserForm', 'id': user.id, 'name': user.name, 'project': self.tenant.id, } res = self.client.post(USER_UPDATE_URL, formData) self.assertNoFormErrors(res) self.assertMessageCount(error=1) @test.create_stubs({api.keystone: ('domain_get', 'user_get', 'tenant_list')}) def test_update_validation_for_password_too_short(self): user = self.users.get(id="1") domain_id = user.domain_id domain = self.domains.get(id=domain_id) api.keystone.user_get(IsA(http.HttpRequest), '1', admin=True).AndReturn(user) api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \ .AndReturn(domain) api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \ .AndReturn([self.tenants.list(), False]) self.mox.ReplayAll() formData = {'method': 'UpdateUserForm', 'id': user.id, 'name': user.name, 'email': user.email, 'password': 't', 'project': self.tenant.id, 'confirm_password': 't'} res = self.client.post(USER_UPDATE_URL, formData) self.assertFormError( res, "form", 'password', ['Password must be between 8 and 18 characters.']) @test.create_stubs({api.keystone: ('domain_get', 'user_get', 'tenant_list')}) def test_update_validation_for_password_too_long(self): user = self.users.get(id="1
import subprocess as sp def matches(text):
return text.startswith('#') def process(text): text = text[1:] result = sp.check_output(text, shell=True).decode('utf-8').rstrip().replace('\\n', '\n') retu
rn result
import os import sys from direct.showbase.ShowBase import ShowBase import panda3d.core as p3d import blenderpanda import inputmapper from nitrogen import gamestates if hasattr(sys, 'frozen'): APP_ROOT_DIR = os.path.dirname(sys.executable) else: APP_ROOT_DIR = os.path.dirname(__file__) if not APP_ROOT_DIR: print("empty app_root_dir") sys.exit() # prc files to load sorted by load order CONFIG_ROOT_DIR = os.path.join(APP_ROOT_DIR, 'config') CONFIG_FILES = [ os.path.join(CONFIG_ROOT_DIR, 'game.prc'), os.path.join(CONFIG_ROOT_DIR, 'user.prc'), ] for config_file in CONFIG_FILES: if os.path.exists(config_file): print("Loading config file:", config_file) config_file = p3d.Filename.from_os_specific(config_file) p3d.load_prc_file(config_file) else: print("Could not find config file", config_file) class GameApp(ShowBase): def __init__(self): ShowBase.__init__(self) blenderpanda.init(se
lf) self.input_mapper = inputmapper.InputMapper(os.path.join(CONFIG_ROOT_DIR, 'input.conf')) self.accept('quit', sys.exit)
self.disableMouse() winprops = self.win.get_properties() self.win.move_pointer(0, winprops.get_x_size() // 2, winprops.get_y_size() // 2) winprops = p3d.WindowProperties() winprops.set_mouse_mode(p3d.WindowProperties.M_confined) self.win.request_properties(winprops) self.current_state = gamestates.MainState() def update_gamestate(task): self.current_state.update(p3d.ClockObject.get_global_clock().get_dt()) return task.cont self.taskMgr.add(update_gamestate, 'GameState') def change_state(self, next_state): self.current_state.cleanup() self.current_state = next_state() def main(): app = GameApp() app.run() if __name__ == '__main__': main()
# Copyright (c) 2014 Katsuya Noguchi # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import unittest import slack.h
ttp_client from slack.exception import SlackError, \ InvalidAuthError, \
NotAuthedError, \ AccountInactiveError, \ ChannelNotFoundError, \ ChannelArchivedError, \ NotInChannelError, \ RateLimitedError class TestRaiseErrorClient(unittest.TestCase): def test_ok_response(self): # does not raise error if response is ok slack.http_client._raise_error_if_not_ok({ 'ok': True }) def test_invalid_auth(self): self.assertRaises(InvalidAuthError, slack.http_client._raise_error_if_not_ok, { 'ok': False, 'error': 'invalid_auth' }) def test_not_authed(self): self.assertRaises(NotAuthedError, slack.http_client._raise_error_if_not_ok, { 'ok': False, 'error': 'not_authed' }) def test_account_inactive(self): self.assertRaises(AccountInactiveError, slack.http_client._raise_error_if_not_ok, { 'ok': False, 'error': 'account_inactive' }) def test_channel_not_found(self): self.assertRaises(ChannelNotFoundError, slack.http_client._raise_error_if_not_ok, { 'ok': False, 'error': 'channel_not_found' }) def test_is_archived(self): self.assertRaises(ChannelArchivedError, slack.http_client._raise_error_if_not_ok, { 'ok': False, 'error': 'is_archived' }) def test_not_in_channel(self): self.assertRaises(NotInChannelError, slack.http_client._raise_error_if_not_ok, { 'ok': False, 'error': 'not_in_channel' }) def test_rate_limited(self): self.assertRaises(RateLimitedError, slack.http_client._raise_error_if_not_ok, { 'ok': False, 'error': 'rate_limited' }) def test_slack_error(self): self.assertRaises(SlackError, slack.http_client._raise_error_if_not_ok, { 'ok': False, 'error': 'unknown_error' })
from __future__ import division import state import time import csv import random import sys POPULATION_SIZE = 100 MAX_COLLISION = 28 VALID_ARGS = "emg" class FitnessListener(): def __init__(self, qtd=0): self._qtd = qtd def log(self): self._qtd += 1 def retrive_qtd(self): return self._qtd def reset(self): self._qtd = 0 def copy(self): return FitnessListener(self._qtd) def choose_method(string): if "roulette".startswith(string): method = "roulette" elif "tourney".startswith(string): method = "tourney" else: sys.exit(string + " is not a valid population generation method.") return method def choose_generations(string): try: generations = int(string) except ValueError: sys.exit("Argument " + string + " is not an integer.\nThe argument provided with --generations must be an integer.") else: return generations def make_config(arguments): elitist = None method = None max_generations = None mutation_rate = 0.8 #flag para permitir argumentos "inválidos" se vierem #depois de opções que precisam de argumentos next_is_literal_argument = False err = False for index, arg in enumerate(arguments[1:]): index += 1 if arg[:2] == "--": argstr = arg[2:] if argstr == "elitist": elitist = True elif argstr == "method": if len(arguments) > index+1: methodstr = arguments[index+1] method = choose_method(methodstr) next_is_literal_argument = True
else: sys.exit("--method used, but no method specified for population generation") elif argstr == "generations": if len(arguments) > index+1: genstr = arguments[index+1] max_generations = c
hoose_generations(genstr) next_is_literal_argument = True else: sys.exit("--generations used, but no number of generations specified") elif argstr == "mutation": mutation_rate = arguments[index+1] next_is_literal_argument = True else: sys.exit("argument " + argstr + " is invalid") elif arg[:1] == "-": argstr = arg[1:] err = False for c in argstr: if c not in VALID_ARGS: print "Unknown command-line argument", c err = True if not err: if 'e' in argstr: elitist = True if 'm' in argstr: if 'm' not in argstr[:-1] and len(arguments) > index+1: methodstr = arguments[index+1] method = choose_method(methodstr) next_is_literal_argument = True elif 'm' in argstr[:-1]: sys.exit("-m option must be immediately followed by method name") else: sys.exit("-m used, but no method specified for population generation") if 'g' in argstr: if 'g' not in argstr[:-1] and len(arguments) > index+1: genstr = arguments[index+1] max_generations = choose_generations(genstr) next_is_literal_argument = True elif 'g' in argstr[:-1]: sys.exit("-g option must be immediately followed by number of generations") else: sys.exit("-g used, but no number of generations specified") else: sys.exit(1) #se o argumento não era válido, levantar um erro #se não tivermos a flag de aceitar inválidos #levantada elif not next_is_literal_argument: print "Unknown command-line argument", arg err = True #mas caso a flag de aceitar argumento inválido #estivesse levantada, precisamos abaixá-la else: next_is_literal_argument = False if err: sys.exit(1) else: return elitist, method, max_generations, mutation_rate def register_loop(population,generation,results_file): maxfitness = max([x.fitness() for x in population]) print "Generation %d, Max fitness: %d" % (generation, max([x.fitness() for x in population])) avgfitness = sum([x.fitness() for x in population])/len(population) print "Average fitness:", avgfitness results_file.writerow([generation, maxfitness, avgfitness]) if __name__ == '__main__': random.seed(time.time()) generation = 1 listener = FitnessListener() elitist, method, max_generations, mutation_rate = make_config(sys.argv) population = [state.State(listener=listener, crossover_rate = 1.0, mutation_rate = mutation_rate) for x in range(POPULATION_SIZE)] if elitist == None: elitist = False if method == None: method = "roulette" with open('results' + str(int(time.time())) + '.csv', 'w+') as csvfile: results_file = csv.writer(csvfile, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL) results_file.writerow(['Generation', 'Max Fitness', 'Avg Fitness']) while not MAX_COLLISION in [x.fitness() for x in population] and ((generation <= max_generations) if max_generations else True): register_loop(population = population,generation = generation,results_file = results_file) population = state.generateNextPopulation(listener=listener, population=population, n=POPULATION_SIZE, method=method, elitist=elitist) generation += 1 register_loop(population = population,generation = generation,results_file = results_file) for x in population: if x.fitness() == MAX_COLLISION: print x.state
"""!event [num]: Displays the next upcoming H@B event.""" __
match__ = r"!event( .*)"
# This doesn't work- not updated with eventmaster.py updates # TODO: Fix This :) # Import Libraries import eventmaster import time import random import sys import unittest import sys class InputsTestCase(unittest.TestCase): def setUp(self): self.s3 = E2S3.E2S3Switcher() self.s3.set_verbose(0) self.s3.set_CommsXML_IP("127.0.0.1") self.s3.set_CommsXML_Port(9876) if not self.s3.connect(): return -1 while self.s3.is_ready()
!= 1: time.sleep(1) def test_set_valid_name_on_invalid_input(self): test_str = "PYTEST-{0!s}".format(random.randint(1,10)) self.assertRaises(ValueError, lambda: self.s3.get_input(99).set_Name(test_str)) def test_set_valid_name_on_valid_input
(self): test_str = "PYTEST-{0!s}".format(random.randint(1,10)) while(self.s3.has_been_processed(self.s3.get_input(0).set_Name(test_str))==0): time.sleep(1) time.sleep(1) self.assertEqual(test_str, self.s3.get_input(0).get_Name()) def test_set_invalid_name_on_valid_input(self): MyObject = type('MyObject', (object,), {}) self.assertEqual(self.s3.get_input(0).set_Name(MyObject), None) print unittest.main() sys.exit()
#!/usr/bin/python import psycopg2 import sys import pprint import geocoder def printProgress(iteration, total, prefix='', suffix='', decimals=2, barLength=100): filledLength = int(round(barLength * iteration / float(total))) percents = round(100.00 * (iteration / float(total)), decimals) bar = '#' * filledLength + '-' * (barLength - filledLength) sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, bar, percents, '%', suffix)), sys.stdout.flush() if iteration == total: print("\n") def main(): #Define our connection string conn_string = "host='localhost' dbname='my_database' user='postgres' password='secret'" # print the connection string we will use to connect #print "Connecting to database\n ->%s" % (conn_string) # get a connection, if a connect cannot be made an exception will be raised here conn = psycopg2.connect("dbname='twitterdb' user='test' host='localhost' password='test'") # conn.cursor will return a cursor object, you can use this cursor to perform queries cursor = conn.cursor() #print "Conne
cted!\n" # execute our Query cursor.execute("SELE
CT user_id FROM users2 ") rows = cursor.fetchall() i = 0 l = len(rows) printProgress(i, l, prefix = 'Progress:', suffix = 'Complete', barLength = 50) for r in rows: print(r[0]) cursor2 = conn.cursor() cursor2.execute("delete from users2 where user_id=(%s) and friends_count!=(select max(friends_count) from users2 where user_id=(%s))",(str(r[0]),str(r[0]),)) #cursor3 = conn.cursor() #rows2= cursor2.fetchall() printProgress(i, l, prefix='Progress:', suffix='Complete', barLength=50) i +=1 #337125576 conn.commit() main()
"""Functions for downloading and reading MNIST data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import urllib import numpy from six.moves import xrange # pylint: disable=redefined-builtin SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' def maybe_download(filename, work_directory): """Download the data from Yann's website, unless it's already here.""" if not os.path.exists(work_directory): os.mkdir(work_directory) filepath = os.path.join(work_directory, filename) if not os.path.exists(filepath): filepath, _ = urllib.urlretriev
e(SOURCE_URL + filename, filepath) statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath def _read32(bytestream): dt = numpy.dtype(numpy.uint32).newbyteorder('>') return numpy.frombuffer(bytestream.read(4), dtype=dt) def extra
ct_images(filename): """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, filename)) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data def dense_to_one_hot(labels_dense, num_classes=10): """Convert class labels from scalars to one-hot vectors.""" num_labels = labels_dense.shape[0] index_offset = numpy.arange(num_labels) * num_classes labels_one_hot = numpy.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot def extract_labels(filename, one_hot=False): """Extract the labels into a 1D uint8 numpy array [index].""" print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2049: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, filename)) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) if one_hot: return dense_to_one_hot(labels) return labels class DataSet(object): def __init__(self, images, labels, fake_data=False): if fake_data: self._num_examples = 10000 else: assert images.shape[0] == labels.shape[0], ( "images.shape: %s labels.shape: %s" % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(numpy.float32) images = numpy.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0 @property def images(self): return self._images @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size, fake_data=False): """Return the next `batch_size` examples from this data set.""" if fake_data: fake_image = [1.0 for _ in xrange(784)] fake_label = 0 return [fake_image for _ in xrange(batch_size)], [ fake_label for _ in xrange(batch_size)] start = self._index_in_epoch self._index_in_epoch += batch_size if self._index_in_epoch > self._num_examples: # Finished epoch self._epochs_completed += 1 # Shuffle the data perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] # Start next epoch start = 0 self._index_in_epoch = batch_size assert batch_size <= self._num_examples end = self._index_in_epoch return self._images[start:end], self._labels[start:end] def read_data_sets(train_dir, fake_data=False, one_hot=False): class DataSets(object): pass data_sets = DataSets() if fake_data: data_sets.train = DataSet([], [], fake_data=True) data_sets.validation = DataSet([], [], fake_data=True) data_sets.test = DataSet([], [], fake_data=True) return data_sets TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = maybe_download(TRAIN_IMAGES, train_dir) train_images = extract_images(local_file) local_file = maybe_download(TRAIN_LABELS, train_dir) train_labels = extract_labels(local_file, one_hot=one_hot) local_file = maybe_download(TEST_IMAGES, train_dir) test_images = extract_images(local_file) local_file = maybe_download(TEST_LABELS, train_dir) test_labels = extract_labels(local_file, one_hot=one_hot) validation_images = train_images[:VALIDATION_SIZE] validation_labels = train_labels[:VALIDATION_SIZE] train_images = train_images[VALIDATION_SIZE:] train_labels = train_labels[VALIDATION_SIZE:] data_sets.train = DataSet(train_images, train_labels) data_sets.validation = DataSet(validation_images, validation_labels) data_sets.test = DataSet(test_images, test_labels) return data_sets
ause: {"name": the name of model's field, "desc": reverse sort on this field if True} Returns: ([joins], order) - a tuple of joins required for this ordering to work and ordering clause itself; join is None if no join required or [(aliased entity, relationship field)] if joins required. """ def by_similarity(): """Join similar_objects subquery, order by weight from it.""" join_target = flask.g.similar_objects_query.subquery() join_condition = model.id == join_target.c.id joins = [(join_target, join_condition)] order = join_target.c.weight return joins, order def by_ca(): """Join fulltext index table, order by indexed CA value.""" alias = sa.orm.aliased(Record, name=u"fulltext_{}".format(self._count)) joins = [(alias, sa.and_( alias.key == model.id, alias.type == model.__name__, alias.property == key) )] order = alias.content return joins, order def by_foreign_key(): """Join the related model, order by title or name/email.""" related_model = attr.property.mapper.class_ if issubclass(related_model, models.mixins.Titled): joins = [(alias, _)] = [(sa.orm.aliased(attr), attr)] order = alias.title elif issubclass(related_model, models.Person): joins = [(alias, _)] = [(sa.orm.aliased(attr), attr)] order = sorting_field_for_person(alias) else: raise NotImplementedError(u"Sorting by {model.__name__} is " u"not implemented yet." .format(model=related_model)) return joins, order def by_m2m(): """Join the Person model, order by name/email. Implemented only for ObjectOwner mapping. """ if issubclass(attr.target_class, models.object_owner.ObjectOwner): # NOTE: In the current implementation we sort only by the first # assigned owner if multiple owners defined oo_alias_1 = sa.orm.aliased(models.object_owner.ObjectOwner) oo_alias_2 = sa.orm.aliased(models.object_owner.ObjectOwner) oo_subq = db.session.query( oo_alias_1.ownable_id, oo_alias_1.ownable_type, oo_alias_1.person_id, ).filter( oo_alias_1.ownable_type == model.__name__, ~sa.exists().where(sa.and_( oo_alias_2.ownable_id == oo_alias_1.ownable_id, oo_alias_2.ownable_type == oo_alias_1.ownable_type, oo_alias_2.id < oo_alias_1.id, )), ).subquery() owner = sa.orm.aliased(models.Person, name="owner") joins = [ (oo_subq, sa.and_(model.__name__ == oo_subq.c.ownable_type, model.id == oo_subq.c.ownable_id)), (owner, oo_subq.c.person_id == owner.id), ] order = sorting_field_for_person(owner) else: raise NotImplementedError(u"Sorting by m2m-field '{key}' " u"is not implemented yet." .format(key=key)) return joins, order # transform clause["name"] into a model's field name key = clause["name"].lower() if key == "__similarity__": # special case if hasattr(flask.g, "similar_objects_query"): joins, order = by_similarity() else: raise BadQueryException("Can't order by '__similarity__' when no ", "'similar' filter was applied.") else: key, _ = self.attr_name_map[model].get(key, (key, None)) attr = getattr(model, key.encode('utf-8'), None) if attr is None: # non object attributes are treated as custom attributes self._count += 1 joins, order = by_ca() elif (isinstance(attr, sa.orm.attributes.InstrumentedAttribute) and isinstance(attr.property, sa.orm.properties.RelationshipProperty)): joins, order = by_foreign_key() elif isinstance(attr, sa.ext.associationproxy.AssociationProxy): joins, order = by_m2m() else: # a simple attribute joins, order = Non
e, attr if clause.get("desc", False): order = order.desc() return
joins, order join_lists, orders = zip(*[joins_and_order(clause) for clause in order_by]) for join_list in join_lists: if join_list is not None: for join in join_list: query = query.outerjoin(*join) return query.order_by(*orders) def _build_expression(self, exp, object_class): """Make an SQLAlchemy filtering expression from exp expression tree.""" if "op" not in exp: return None def autocast(o_key, operator_name, value): """Try to guess the type of `value` and parse it from the string. Args: o_key (basestring): the name of the field being compared; the `value` is converted to the type of that field. operator_name: the name of the operator being applied. value: the value being compared. Returns: a list of one or several possible meanings of `value` type compliant with `getattr(object_class, o_key)`. """ def has_date_or_non_date_cad(title, definition_type): """Check if there is a date and a non-date CA named title. Returns: (bool, bool) - flags indicating the presence of date and non-date CA. """ cad_query = db.session.query(CustomAttributeDefinition).filter( CustomAttributeDefinition.title == title, CustomAttributeDefinition.definition_type == definition_type, ) date_cad = bool(cad_query.filter( CustomAttributeDefinition. attribute_type == CustomAttributeDefinition.ValidTypes.DATE, ).count()) non_date_cad = bool(cad_query.filter( CustomAttributeDefinition. attribute_type != CustomAttributeDefinition.ValidTypes.DATE, ).count()) return date_cad, non_date_cad if not isinstance(o_key, basestring): return [value] key, custom_filter = (self.attr_name_map[object_class] .get(o_key, (o_key, None))) date_attr = date_cad = non_date_cad = False try: attr_type = getattr(object_class, key).property.columns[0].type except AttributeError: date_cad, non_date_cad = has_date_or_non_date_cad( title=key, definition_type=object_class.__name__, ) if not (date_cad or non_date_cad) and not custom_filter: # TODO: this logic fails on CA search for Snapshots pass # no CA with this name and no custom filter for the field # raise BadQueryException(u"Model {} has no field or CA {}" # .format(object_class.__name__, o_key)) else: if isinstance(attr_type, sa.sql.sqltypes.Date): date_attr = True converted_date = None if (date_attr or date_cad) and isinstance(value, basestring): try: converted_date = convert_date_format( value, CustomAttributeValue.DATE_FORMAT_JSON, CustomAttributeValue.DATE_FORMAT_DB, ) except (TypeError, ValueError): # wrong format or not a date if not non_date_cad: # o_key is not a non-date CA raise BadQueryException(u"Field '{}' expects a '{}' date" .format( o_key, CustomAttributeValue.DATE_FORMAT_JSON, )) if date_attr or (date_cad and not non_date_cad): # Filter by converted date return [converted_date] elif da
class BinaryTree: def __init__(self,rootObj): self.key = rootObj self.leftChild = None self.rightChild = None def insertLeft(self,newNode): if self.leftChild == None: self.leftChild = BinaryTree(newNode) else: t = BinaryTree(newNode) t.leftChild = self.leftChild self.leftChild = t def insertRight(self,newNode): if self.rightChild == None: se
lf.rightChild = BinaryTree(newNode) else: t = BinaryTree(newNode) t.rightChild = self.rightChild self.rightChild = t def getRightChild(self):
return self.rightChild def getLeftChild(self): return self.leftChild def setRootVal(self,obj): self.key = obj def getRootVal(self): return self.key
from sys import version, exit from setuptools import setup requirements = open("requirements.txt").read().split() with open("R
EADME.md") as f: long_description = f.read() setup( name = 'bagcat', version = '0.0.6', url = 'https://github.com/umd-mith/bagcat/', author = 'Ed Summers', author_email = 'ehs@pobox.com', py_modules = ['bagcat',], install_requires = requirements, description = "A command line utility for managing BagIt packages in
Amazon S3", long_description=long_description, long_description_content_type="text/markdown", entry_points={"console_scripts": ["bagcat=bagcat:main"]}, )
""" tests.test_component_demo ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tests demo component. """ import unittest import homeassistant.core as ha import homeassistant.components.automation as automation import homeassistant.components.automation.event as event from homeassistant.const import CONF_PLATFORM, ATTR_ENTITY_ID class TestAutomationEvent(unittest.TestCase): """ Test the event automation. """ def setUp(self): # pylint: disable=invalid-name self.hass = ha.HomeAssistant() self.calls = [] def record_call(service): self.calls.append(service) self.hass.services.register('test', 'automation', record_call) def tearDown(self): # pylint: disable=invalid-name """ Stop down stuff we started. """ self.hass.stop() def test_setup_fails_if_unknown_platform(self): self.assertFalse(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'i_do_not_exist' } })) def test_service_data_not_a_dict(self): automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'event', event.CONF_EVENT_TYPE: 'test_event', automation.CONF_SERVICE: 'test.automation', automation.CONF_SERVICE_DATA: 100 } }) self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_service_specify_data(self): automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'event', event.CONF_EVENT_TYPE: 'test_event', automation.CONF_SERVICE: 'test.automation', automation.CONF_SERVICE_DATA: {'some': 'data'} } }) self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) self.assertEqual('data'
, self.calls[0].data['some']) def test_service_specify_entity_id(self): automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'event', event.CONF_EVENT_TYPE: 'test_event', automation.CONF_SERVICE: 'test.automation', automation.CONF_SERVICE_ENTITY_ID: 'hello.world' } }) self.hass.bus.fire
('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) self.assertEqual(['hello.world'], self.calls[0].data[ATTR_ENTITY_ID])
from typing imp
ort List class Solution: def findMin(self, nums: List[int]) -> int: first = nums[0] # Iterate until the next number is less than current. for num in nums: if num < first:
return num return first
values.pop('info_cache', None) if info_cache is not None: instance_ref['info_cache'].update(info_cache) security_groups = values.pop('security_groups', []) instance_ref['extra'] = models.InstanceExtra() instance_ref['extra'].update( {'numa_topology': None, 'pci_requests': None, 'vcpu_model': None, }) instance_ref['extra'].update(values.pop('extra', {})) instance_ref.update(values) def _get_sec_group_models(session, security_groups): models = [] def
ault_group = _security_group_ensure_default(context, session) if 'default' in security_groups: models.append(default_group) # Generate a new list, so we don't modify the original security_groups = [x for x in security_groups if x != 'default'] if security_groups: models.extend(_security_group_get_by_names(context,
session, context.project_id, security_groups)) return models session = get_session() with session.begin(): if 'hostname' in values: _validate_unique_server_name(context, session, values['hostname']) instance_ref.security_groups = _get_sec_group_models(session, security_groups) session.add(instance_ref) # create the instance uuid to ec2_id mapping entry for instance ec2_instance_create(context, instance_ref['uuid']) return instance_ref def _instance_data_get_for_user(context, project_id, user_id, session=None): result = model_query(context, models.Instance, ( func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb), ), session=session).\ filter_by(project_id=project_id) if user_id: result = result.filter_by(user_id=user_id).first() else: result = result.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def instance_destroy(context, instance_uuid, constraint=None): session = get_session() with session.begin(): if uuidutils.is_uuid_like(instance_uuid): instance_ref = _instance_get_by_uuid(context, instance_uuid, session=session) else: raise exception.InvalidUUID(instance_uuid) query = model_query(context, models.Instance, session=session).\ filter_by(uuid=instance_uuid) if constraint is not None: query = constraint.apply(models.Instance, query) count = query.soft_delete() if count == 0: raise exception.ConstraintNotMet() model_query(context, models.SecurityGroupInstanceAssociation, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceInfoCache, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceMetadata, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceFault, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceExtra, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceSystemMetadata, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() # NOTE(snikitin): We can't use model_query here, because there is no # column 'deleted' in 'tags' table. session.query(models.Tag).filter_by(resource_id=instance_uuid).delete() return instance_ref @require_context def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False): return _instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join, use_slave=use_slave) def _instance_get_by_uuid(context, uuid, session=None, columns_to_join=None, use_slave=False): result = _build_instance_get(context, session=session, columns_to_join=columns_to_join, use_slave=use_slave).\ filter_by(uuid=uuid).\ first() if not result: raise exception.InstanceNotFound(instance_id=uuid) return result @require_context def instance_get(context, instance_id, columns_to_join=None): try: result = _build_instance_get(context, columns_to_join=columns_to_join ).filter_by(id=instance_id).first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result except db_exc.DBError: # NOTE(sdague): catch all in case the db engine chokes on the # id because it's too long of an int to store. msg = _("Invalid instance id %s in request") % instance_id LOG.warn(msg) raise exception.InvalidID(id=instance_id) def _build_instance_get(context, session=None, columns_to_join=None, use_slave=False): query = model_query(context, models.Instance, session=session, project_only=True, use_slave=use_slave).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('info_cache')) if columns_to_join is None: columns_to_join = ['metadata', 'system_metadata'] for column in columns_to_join: if column in ['info_cache', 'security_groups']: # Already always joined above continue if 'extra.' in column: query = query.options(undefer(column)) else: query = query.options(joinedload(column)) # NOTE(alaski) Stop lazy loading of columns not needed. for col in ['metadata', 'system_metadata']: if col not in columns_to_join: query = query.options(noload(col)) return query def _instances_fill_metadata(context, instances, manual_joins=None, use_slave=False): """Selectively fill instances with manually-joined metadata. Note that instance will be converted to a dict. :param context: security context :param instances: list of instances to fill :param manual_joins: list of tables to manually join (can be any combination of 'metadata' and 'system_metadata' or None to take the default of both) """ uuids = [inst['uuid'] for inst in instances] if manual_joins is None: manual_joins = ['metadata', 'system_metadata'] meta = collections.defaultdict(list) if 'metadata' in manual_joins: for row in _instance_metadata_get_multi(context, uuids, use_slave=use_slave): meta[row['instance_uuid']].append(row) sys_meta = collections.defaultdict(list) if 'system_metadata' in manual_joins: for row in _instance_system_metadata_get_multi(context, uuids, use_slave=use_slave): sys_meta[row['instance_uuid']].append(row) pcidevs = collections.defaultdict(list) if 'pci_devices' in manual_joins: for row in _instance_pcidevs_get_multi(context, uuids): pcidevs[row['instance_uuid']].append(row) filled_instances = [] for inst in instances: inst = dict(inst) inst['system_metadata'] = sys_meta[inst['uuid']] inst['metadata'] = meta[inst['uuid']] if 'pci_devices' in manual_joins: inst['pci_devices'] = pcidevs[inst['uuid']] filled_instances.append(inst) return filled_instances def _manual_join_
#!/usr/bin/env python import os.path import unittest import pep8 SRC_PATH = os.path.dirname(os.path.dirname(__file__)) EXCLUDE = ['.svn', 'CVS', '.bzr', '.hg', '.git', 'Paste-1.7.5.1-py2.6.egg', 'PasteDeploy-1.5.0-py2.6.egg', 'data'] class AdhocracyStyleGuide(pep8.StyleGuide): def ignore_code(self, code): IGNORED = [ 'E111', # indentation is not a multiple of four 'E121', # continuation line indentation is not a multiple of four 'E122', # continuation line missing indentation or outdented
'E123', # closing bracket does not match indentation of opening # bracket 'E124', # closing bracket does not match visual indentation 'E126', # continuation line over 'E127', # continuation line over 'E128', # continuation line under 'E225', # missing whitespace around operator 'E226', # missing optional whitespace around operator '
E231', # missing whitespace after 'E241', # multiple spaces after 'E251', # no spaces around keyword 'E261', # at least two spaces before inline comment 'E301', # expected 1 blank line 'E302', # expected 2 blank lines 'E303', # too many blank lines 'E501', # line too long 'E701', # multiple statements on one line 'E702', # multiple statements on one line 'E711', # comparison to None should be 'if cond is None:' 'E712', # comparison to True should be 'if cond is True:' or # 'if cond:' 'W291', # trailing whitespace 'W292', # no newline at end of file 'W293', # blank line contains whitespace 'W391', # blank line at end of file ] return code in IGNORED class TestPep8(unittest.TestCase): def test_pep8(self): sg = AdhocracyStyleGuide(exclude=EXCLUDE) sg.input_dir(SRC_PATH) self.assertEqual(sg.options.report.get_count(), 0)
# 0 1 2 4 6 7 9 # / \ | # 3 8 5 # # 02:04 -- union(9, 4) -------- # WAS: id[] 0 1 2 4 4 6 6 7 4 9 # NOW: id[] 0 1 2 4 4 6 6 7 4 4 # . . . . . . . . . X # # 0 1 2 4 6 7 # /|\ | # 3 8 9 5 # # # 02:12 -- union(2, 1) -------- # WAS: id[] 0 1 2 4 4 6 6 7 4 4 # NOW: id[] 0 2 2 4 4 6 6 7 4 4 # . X . . . . . . . . # # 0 2 4 6 7 # | /|\ | # 1 3 8 9 5 # # # 02:17 -- union(5, 0) -------- # WAS: id[] 0 1 2 4 4 6 6 7 4 4 # NOW: id[] 6 2 2 4 4 6 6 7 4 4 # X . . . . . . . . . # # 2 4 6 7 # | /|\ / \ # 1 3 8 9 0 5 # # # 02:29 -- union(7, 2) -------- # WAS: id[] 6 2 2 4 4 6 6 7 4 4 # NOW: id[] 6 2 2 4 4 6 6 2 4 4 # . . . . . . . X . . # # 2 4 6 # / \ /|\ / \ # 1 7 3 8 9 0 5 # # # 02:37 -- union(6, 1) -------- # WAS: id[] 6 2 2 4 4 6 6 2 4 4 # NOW: id[] 6 2 6 4 4 6 6 2 4 4 # . . X . . . . . . . # # 2 4 6 # / \ /|\ /|\ # 1 7 3 8 9 0 2 5 # / \ # 1 7 # # # 02:37 -- union(6, 1) -------- # WAS: id[] 6 2 2 4 4 6 6 2 4 4 # NOW: id[] 6 2 6 4 4 6 6 2 4 4 # . . X . . . . . . . # # 4 6 # /|\ /|\ # 3 8 9 0 2 5 # / \ # 1 7 # # 02:50 -- union(7, 3) -------- # WAS: id[] 6 2 6 4 4 6 6 2 4 4 # NOW: id[] 6 2 6 4 6 6 6 2 4 4 # . . . . X . . . . . # # +----6 # / /|\ # 4 0 2 5 # /|\ / \ # 3 8 9 1 7 # ## Quick-union defect: ## * Union too expensive (N array accesses) ## * Trees are flat, but too expensive to keep them flat. ## ## Quick-union defect: ## * Trees can get tall. ## * Find too expensive (could be N array accesses). ## #-------------------------------------------------------------------------- # 05:28 WEIGHTED QUICK-UNION ANALYSIS # # 05:38 RUNNING TIME # * FIND: takes time proportional to depth of p and q. # * UNION: takes constant time, given roots. # # 05:45 PROPOSTION: Depth of any node x is at most lg N (lg = log_2(N)) # The cost scales: # Ex: N = 1,000 depth is 10 # Ex: N = 1,000,000 depth is 20 # Ex: N = 1,000,000,000 depth is 30 # depth for 10 objects <= lg(10) = 3.322 # depth for 100 objects <= lg(100) = 6.644 # depth for 1001 objects <= lg(1000) = 9.966 # # 06:23 PROOF: When does depth of x increase? # # Increases by 1 when tree T1 containing x is merged into another tree T2. # * The size of the tree containing x at least doubles since |T2| >= |T1| # * Size of tree containing x can double at most lg(N) times. Why? # When the depth of x increases, the size of its tree size at least doubles # Cost model init union union # quick-find N N 1 # quick-union N N N <- worst case, if tree is tall # weighter QU N lg(N) lg(N) <- includes cost of finding roots # Q: Stop at guaranteed acceptable performance? # A: No, easy to improve further. #-------------------------------------------------------------------------- # 08:26 IMPROVEMENT 2: PATH COMPRESSION # # QUICK UNION WITH PATH COMPRESSION. # Just after computing the root of p, set the id of each examined node to point to that root. # # 10:01 WEIGHTED QUICK-UNION WITH PATH COMPRESSION: AMORTIZED ANALYSIS # # PROPOSITION: [Hopcroft-Ulman, Tarjan] Starting from an N lg*N (iterate log fnc) # empty data structure, ny sequence of M union-find ops ------- ---- # on N objects makes <= c(N + M lg* N) array accesses. 1 0 # * Analysis can be improved to N + M alpha(M, N). 2 1 # * Simple algorithm with fascinating mathematics. 4 2 # 16 3 # 65536 4 # 2^65536 5 # ITERATIVE LOG FUNCTION: # log*N function is the number of times you have to take the log of N to get 1. # REAL WORLD: Think of it as a number less than 5 # 11:23 QUESTION: IS THERE A SIMPLE ALGORITHM THAT IS LINEAR (This one is so close) # ANSWER: No (Fredman and Sacks) #-------------------------------------------------------------------------- # 12:31 SUMMARY # BOTTOM LINE. Weighted quick union (with path compression) makes it # possible to solve problems that could not otherwise be addressed. # # $ M union-find operations on a set of N objects # # $ algorithm worst-case time # $ ------------------------------ --------------------- # $ quick-find M * N # $ quick-union M * N # $ weighted QU N + M log N # $ QU + path compression N + M log N # $ weighted QU + path compression N + M lg*N # # EX. [10^9 union and finds with 10^9 objects] # * WQUPC reduces time from 30 years to 6 seconds. # * Supercomputer won't help much; good algorithm enables solution. #-------------------------------------------------------------------------- # LECTURE QUESTION: # Suppose that the id[] array during the weightes quick union algorithm is # __0__ 8 # 0 1 2 3 4 5 6 7 8 9 / /|\ \ |\ # 0 0 0 0 0 0 7 8 8 8 1 2 3 4 5 7 9 # | # 6 # ANSWER Which id[] entry changes with union(3,6)? ID[8] # # EXPLANATION: In weighted quick union, we make the root of the smaller tree # points to the root of the larger tree. In this example, the algorithm sets id[8]=0 # # Be careful not to confuse union-by-size with union-by-height - the former # uses the **size** of the tree (number of nodes) while the latter uses # the **height** of the tree (number of links on longest path from the root # of the tree to a leaf node). Both variants guarantee logarithmic height. # There is a third variant known as "union-by-rank" that is also widely used. ########################################################################### # Lecture Week 1 Union-Find Applications (9:22) ########################################################################### # UNION-FIND APPLICATIONS: (00:27) Week 1 Lecture "Union-Find Applications" (1:22) # * Percolation # * Games (Go, Hex) # X Dynamic connectivity # * Least common ancestor # * Equivalence of finite state automata # * Hoshen-Kopelman algorithm in physics. # * Hinley-Milner polymorphic type inference. # * Kruskal's minimum spanning tree algorithm. # Graph processing algorithm which uses Union-Find as a sub-routine # * Compiling equivalence statements in Fortran. # * Morphological attribute openings and closings. # ** Matlab's bwlabel() function in **image processing. # How to label area in images # 02:13 A MODEL FOR MANY PHYSICAL SYSTEMS: # * N-by-N grid of sites. # * Each site is open with probability p (or blocked with probability 1-p). # * Syst
em percolates iff top and bottom are connected by open sites. # # model system vacant site occupied site percolates # ------------------ ---------- ----------- ------------- ---------- # electricity material conductor insulated conducts # fluid flow material empty blocked porous # social interaction population person empty communicates # # Goes on to descri
be percolation... # 08:12 SUBTEXT OF TODAY'S LECTURE (AND THIS COURSE) # # STEPS TO DEVELOPING A USABLE ALGORITHM. # * Model the problem. # * Find an algorithm to solve it. # * Fast enough? Fits in memory? # * If not, figure out why. # * Find a way to address the problem. # * Iterate until satisfied. # 09:15 QUESTION # When opening one new site in the percolation simulation, how many times is union() called? # ANSWER: 0, 1, 2, 3, or 4 # EXPLANATION: It is called for each neighboring site that is already open. # There are 4 possible neighbors, but some of them may not already be open. ########################################################################### # Question 3 # Which of the followint id[] arrays(s) could be thr result of running # the weightes quick union algorit
from pychess.Utils.const import * class Rating (): def __init__(self, ratingtype, elo, deviation=DEVIATION_NONE, wins=0, losses=0, draws=0, bestElo=0, bestTime=0): self.type = ratingtype for v in (elo, deviation, wins, losses, draws, bestElo, bestTime): assert v == None or type(v) == int, v self.elo = elo self.deviation = deviation self.wins = wins self.losses = losses self.draws = draws self.bestElo = bestElo self.bestTime = bestTime
def get_elo (self): return self._elo def set_elo (self, elo): self._elo = elo def __repr__ (self): r = "type=%s, elo=%s" % (self.type, self.elo) if self.deviation != None: r += ", deviation=%s" % str(self.deviation) if self.wins > 0: r += ", wins=%s" % str(self.wins) if self.losses > 0: r += ", losses=%s" % str(self.losses) if self.
draws > 0: r += ", draws=%s" % str(self.draws) if self.bestElo > 0: r += ", bestElo=%s" % str(self.bestElo) if self.bestTime > 0: r += ", bestTime=%s" % str(self.bestTime) return r def copy (self): return Rating(self.type, self.elo, deviation=self.deviation, wins=self.wins, losses=self.losses, draws=self.draws, bestElo=self.bestElo, bestTime=self.bestTime) def update (self, rating): if self.type != rating.type: raise TypeError elif self.elo != rating.elo: self.elo = rating.elo elif self.deviation != rating.deviation: self.deviation = rating.deviation elif self.wins != rating.wins: self.wins = rating.wins elif self.losses != rating.losses: self.losses = rating.losses elif self.draws != rating.draws: self.draws = rating.draws elif self.bestElo != rating.bestElo: self.bestElo = rating.bestElo elif self.bestTime != rating.bestTime: self.bestTime = rating.bestTime
from flask import Flask, request, abort import json import ndb_util import model from google.appengine.api import users from google.appengine.ext import ndb from flask_restful import Resource #TODO auth stuff class OrganizationApi(Resource): def get(self, id=None): id = str(id) if id is None: print "soo id is None" abort(401) org_key = ndb.Key('Organization', id) org = org_key.get() if org is None: print 'org doesnt exists' abort(401) client_id = users.get_current_user().user_id() # maybe the client tahts making the http is an user taht wroks for org user_key = ndb.Key('
User', client_id) if client_id != id and user_key not in org.workers: abort(401) print str(type(org.workers)) + ' ' + str(org.workers) + ' ' + str(user_key) return org.to_json() def put(self, id=None): id = str(id) client_id = users.get_cu
rrent_user().user_id() if id is None or client_id != id: print id + ' ' + client_id print "first one" abort(401) org_key = ndb.Key('Organization', id) org = org_key.get() print org if org is None: print "second one" abort(401) body = request.get_json(force=True) body['id'] = id if body['workers'] > 0: body['workers'] = self._generate_kind_keys(body['workers'], 'User') org = org.entity_from_dict(body) if org is False: print "third one" abort(401) else: key = org.put() print key return org.to_json() def post(self): body = request.get_json(force=True) body['id'] = users.get_current_user().user_id() org_key = ndb.Key('Organization', body['id']) if org_key.get() != None: abort(401) org = model.Organization() org = org.entity_from_dict(body) print org if org is False: abort() else: org.put() return org.to_json() def delete(self,id=None): id = str(id) client_id = users.get_current_user().user_id() if id is None or client_id != id: abort(401) org_key = ndb.Key('Organization', id) org_key.delete() return '', 200 def _generate_kind_keys(self, ids, kind): keys = [] for id in ids: keys.append(ndb.Key(kind, id)) return keys
Copyright (C) 2008-2017 Olivier Aubert <contact@olivieraubert.net> # # Advene is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Advene is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Advene; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # """Transcription view. """ import logging logger = logging.getLogger(__name__) import sys import re import os import operator from gi.repository import Gdk from gi.repository import Gtk from gi.repository import Pango try: from gi.repository import GtkSource except ImportError: GtkSource=None import urllib.request, urllib.parse, urllib.error import advene.core.config as config # Advene part from advene.model.package import Package from advene.model.schema import AnnotationType import advene.util.importer import advene.util.helper as helper from advene.util.tools import unescape_string, is_uri from gettext import gettext as _ from advene.gui.views import AdhocView from advene.gui.util import dialog, get_pixmap_button, get_small_stock_button, name2color from advene.gui.util import decode_drop_parameters from advene.gui.edit.properties import EditWidget from advene.gui.util.completer import Completer from advene.gui.widget import TimestampRepresentation name="Note-taking view plugin" def register(controller): controller.register_viewclass(TranscriptionEdit) class TranscriptionImporter(advene.util.importer.GenericImporter): """Transcription importer. """ def __init__(self, transcription_edit=None,
**kw): super(TranscriptionImporter, self).__init__(**kw) self.transcription_edit=transcription_edit self.name = _("Transcription importer") def process_file(self, filename): if filename != 'transcription': return None if self.package is None: self.init_package() self.conve
rt(self.transcription_edit.parse_transcription()) return self.package class TranscriptionEdit(AdhocView): view_name = _("Note taking") view_id = 'transcribe' tooltip = _("Take notes on the fly as a timestamped transcription") def __init__ (self, controller=None, parameters=None, filename=None): super(TranscriptionEdit, self).__init__(controller=controller) self.close_on_package_load = False self.contextual_actions = ( (_("Save view"), self.save_view), (_("Save default options"), self.save_default_options), ) self.controller=controller self.package=controller.package self.sourcefile=None self.empty_re = re.compile(r'^\s*$') self.options = { 'timestamp': True, # _("If checked, click inserts timestamp marks")) 'play-on-scroll': False, 'empty-annotations': True, # _("Do not generate annotations for empty text")) 'delay': config.data.reaction_time, # Marks will be automatically inserted it no keypress occurred in the 3 previous seconds. 'automatic-mark-insertion-delay': 1500, 'insert-on-double-click': True, 'insert-on-single-click': False, 'mark-prefix': "", 'mark-suffix': "", 'autoscroll': True, 'autoinsert': True, 'snapshot-size': 32, 'font-size': 0, 'annotation-type-id': None, } self.colors = { 'default': name2color('lightblue'), 'ignore': name2color('tomato'), 'current': name2color('green'), } self.marks = [] self.current_mark = None opt, arg = self.load_parameters(parameters) self.options.update(opt) self.button_height=20 # When modifying an offset with Control+Scroll, store the last value. # If play-on-scroll, then set the destination upon Control release self.timestamp_play = None self.widget=self.build_widget() self.update_font_size() if filename is not None: self.load_transcription(filename=filename) for n, v in arg: if n == 'text': self.load_transcription(buffer=v) def get_element_height(self, element): return self.button_height def get_save_arguments(self): arguments = [ ('text', "".join(self.generate_transcription())) ] return self.options, arguments def edit_preferences(self, *p): cache=dict(self.options) ew=EditWidget(cache.__setitem__, cache.get) ew.set_name(_("Preferences")) ew.add_checkbox(_("Timestamp"), "timestamp", _("Click inserts timestamp marks")) ew.add_checkbox(_("Insert on double-click"), 'insert-on-double-click', _("A double click inserts the mark")) ew.add_checkbox(_("Insert on single-click"), 'insert-on-single-click', _("A single click inserts the mark")) ew.add_entry(_("Mark prefix"), 'mark-prefix', _("Text to insert before a mark (use \\n for newline)")) ew.add_entry(_("Mark suffix"), 'mark-suffix', _("Text to insert after a mark (use \\n for newline)")) ew.add_checkbox(_("Play on scroll"), "play-on-scroll", _("Play the new position upon timestamp modification")) ew.add_checkbox(_("Generate empty annotations"), "empty-annotations", _("If checked, generate annotations for empty text")) ew.add_spin(_("Reaction time"), "delay", _("Reaction time (substracted from current player time, except when paused.)"), -5000, 5000) ew.add_checkbox(_("Auto-insert"), "autoinsert", _("Automatic timestamp mark insertion")) ew.add_spin(_("Automatic insertion delay"), 'automatic-mark-insertion-delay', _("If autoinsert is active, timestamp marks will be automatically inserted when text is entered after no interaction since this delay (in ms).\n1000 is typically a good value."), 0, 100000) ew.add_spin(_("Font size"), "font-size", _("Font size for text (0 for standard size)"), 0, 48) res=ew.popup() if res: if cache['font-size'] != self.options['font-size']: # Font-size was changed. Update the textview. self.update_font_size(cache['font-size']) self.options.update(cache) return True def update_font_size(self, size=None): if size is None: size=self.options['font-size'] if size == 0: # Get the default value from a temporary textview t=Gtk.TextView() size=int(t.get_pango_context().get_font_description().get_size() / Pango.SCALE) del t f=self.textview.get_pango_context().get_font_description() f.set_size(size * Pango.SCALE) self.textview.modify_font(f) def show_searchbox(self, *p): self.searchbox.show() self.searchbox.entry.grab_focus() return True def highlight_search_forward(self, searched): """Highlight with the searched_string tag the given string. """ b=self.textview.get_buffer() begin, end=b.get_bounds() # Remove searched_string tag occurences that may be left from # a previous invocation b.remove_tag_by_name("searched_string", begin, end) finished=False while not finished: res=begin.forward_search(searched, Gtk.TextSearchFlags.TEXT_ONLY) if not res: finished=True else: matchStart, matchEnd = res b.apply_tag_by_name("searched_string", matchStart, matchEnd) begin=matchEnd def textview_drag_received(self, widget, context, x, y, selection, targetType, time):
from django.db import models from django.utils.translation import ugettext_lazy as _ from projects.models import Project class Necessity(models.Model): """ Item or service that an organization regularly needs """ name = models.CharField(verbose_name=_('Name'), max_length=20) satisfied = models.BooleanField(verbose_name=_('Satisfied'), default=False) def __repr__(self): return '<Necessity({!r}, satisfied={!r})>'.format(self.name, self.satisfied) def __str__(self): return self.name class Organization(models.Model): name = models.CharField( max_length=64, verbose_name=_('Name'), help_text=_('Organization name') ) description = models.TextField( verbose_name=_('Description'), help_text=_('Organization description') ) photo = models.ImageField(verbose_name=_('Photo'), upload_to='organization_photos') coordinator = models.ForeignKey( 'contributors.Contributor', verbose_name=_('Coordinator'), help_text=_('Person responsible for the organization') ) projects = models.ManyToManyField(Project, blank=True) necessities = models.ManyToManyField(Necessity, blank=True) necessity_description = models.TextField( verbose_name=_('Necessity description'), help_text=_('Text to explain the organization material needs') ) email = models.EmailField( verbose
_name=_('Organization email'), blank=True, help_text=_('Contact email for the organization') ) homepage_url = models.URLField( verbose_name=_('Homepage URL'), blank=True, help_text=_('Organization homepage link'), ) facebook_url = models.URLField( verbose_name=_('Facebook URL'), blank=True, help_text=_('Organization facebook link') ) t
witter_url = models.URLField( verbose_name=_('Twitter URL'), blank=True, help_text=_('Organization twitter link') ) def __repr__(self): return '<Organization({})>'.format(self.name) def __str__(self): return self.name
import cgi from urllib import urlencode from Rss_channel import Rss_channel from Rss_item import Rss_item class Updates_rss( Rss_channel ): def __init__( self, recent_notes, notebook_id, notebook_name, https_url, ): if notebook_name == u"Luminotes": notebook_path = u"/" elif notebook_name == u"
Luminotes user guide": notebook_path = u"/guide" elif notebook_name == u"Luminotes blog": notebook_path = u"/blog" else: notebook_path = u"/notebooks/%s" % notebook_id notebook_path = https_url + notebook_path Rss_channel.__init__( self, cg
i.escape( notebook_name ), notebook_path, u"Luminotes notebook", recent_notes and [ Rss_item( title = u"Note updated", link = self.note_link( notebook_id, notebook_name, note_id, revision, https_url ), description = cgi.escape( u'A note in <a href="%s">this notebook</a> has been updated. <a href="%s?note_id=%s">View the note.</a>' % ( notebook_path, notebook_path, note_id ) ), date = revision.strftime( "%Y-%m-%dT%H:%M:%SZ" ), guid = self.note_link( notebook_id, notebook_name, note_id, revision, https_url ), ) for ( note_id, revision ) in recent_notes ] or [ Rss_item( title = u"Unknown notebook", link = None, description = cgi.escape( u'Sorry, that notebook is unknown.' ), date = None, guid = None, ) ], ) @staticmethod def note_link( notebook_id, notebook_name, note_id, revision, https_url ): query = urlencode( [ ( u"notebook_id", notebook_id ), ( u"notebook_name", notebook_name.encode( "utf8" ) ), ( u"note_id", note_id ), ( u"revision", unicode( revision ) ), ] ) return cgi.escape( u"%s/notebooks/get_update_link?%s" % ( https_url, query ) )
import tensorflow as tf """tf.pow(x,y,name=None) 功能:计算x各元素的y次方。 输入:x,y为张量,可
以为`float32`, `float64`, `int32`, `int64`,`complex64`,`complex128`类型。""" x = tf.constant([[2, 3, 5], [2, 3, 5]], tf.float64) y = tf.constant([[2, 3, 4]], tf.float64) z = tf.pow(x, y) sess = tf.Session() print(sess.run(z)) sess.close() """[[ 4. 27. 625.] [
4. 27. 625.]]"""
ort django django.setup() # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Érudit.org' copyright = '2016 Érudit' author = 'David Cormier' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'fr' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'ruditorgdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'ruditorg.tex', 'Érudit.org Documentation', 'Érudit', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'ruditorg',
'Érudit.org Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, categor
y) texinfo_documents = [ (master_doc, 'ruditorg', 'Érudit.org Documentation', autho
fro
m .readtime import *
from __future__ import unicode_literals from reviewboard.hostingsvcs.tests.testcases import ServiceTests class RedmineTests(ServiceTests): """Unit tests for the Redmine hosting service.""" service_name = 'redmine' fixtures = ['test_scmtools'] def test_service_support(self): """Testing the Redmine service support capabilities""" sel
f.assertTrue(self.service_class.supports_bug_trackers) self.assertFalse(self.service_class.supports_repositories) def test_bug_tracker_field(self): """Testing the Redmine bug tracker field value""
" self.assertFalse( self.service_class.get_bug_tracker_requires_username()) self.assertEqual( self.service_class.get_bug_tracker_field(None, { 'redmine_url': 'http://redmine.example.com', }), 'http://redmine.example.com/issues/%s')
#!/usr/bin/env python import unittest from tests.logic_t.layer.LogicLayer.util import generate_ll class SearchTest(unittest.TestCase): def setUp(self): self.ll = generate_ll() self.pl = self.ll.pl self.admin = self.pl.create_user('name@example.org', None, True) self.pl.add(self.admin) self.pl.commit() def test_empty_db_yields_no_results(self): # when results = self.ll.search('something', self.admin) # then self.assertIsNotNone(results) results2 = list(results) self.assertEqual([], results2) def test_matching_summary_yields_task(self): # given task = self.pl.create_task('one two three') self.pl.add(task) self.pl.commit() # when results = self.ll.search('two', self.admin) # then self.assertIsNotNone(results) results2 = list(results) self.assertEqual([task], results2) def test_no_matching_summary_yields_nothing(self): # given task = self.pl.create_task('one two three') self.pl.add(task) self.pl.commit() # when results = self.ll.search('four', self.admin) # then self.assertIsNotNone(results) results2 = list(results) self.assertEqual([], results2) def test_non_admin_may_access_own_tasks(self): # given user1 = self.pl.create_user('user1@example.org', None, False) self.pl.add(user1) task = self.pl.create_task('one two three') task.users.append(user1) self.pl.add(task) self.pl.commit() # when results = self.ll.search('two', user1) # then self.assertIsNotNone(results) results2 = list(results) self.assertEqual([task], results2) def test_non_admin_may_not_access_other_tasks(self): # given user1 = self.pl.create_user('user1@example.org', None, False) self.pl.add(user1) user2 = self.pl.create_user('user2@example.org', None, False)
self.pl.add(user2) task = self.pl.create_task('one two three') task.users.append(user1) self.pl.add(task) self.pl.commit() # when results = self.ll.search('two', user2)
# then self.assertIsNotNone(results) results2 = list(results) self.assertEqual([], results2)
mmit() food_group = session.\ query(model.FoodGroup).\ filter(model.FoodGroup.name == row_in[3]).\ one() result = { 'long_desc': row_in[0], 'short_desc': row_in[1], 'manufacturer': row_in[2], 'group_id': food_group.id, 'refuse_pct': row_in[4] } return result def process_row_local_food_weight(row_in, args): session = args['session'] food = session.\ query(model.Food).\ filter(model.Food.long_desc == row_in[0]).\ one() session.\ query(model.Weight).\ filter(and_( model.Weight.food_id == food.id, model.Weight.measurement_desc == row_in[2], )).\ delete() session.commit() prev_sequence = session.\ query(func.max(model.Weight.sequence)).\ filter(model.Weight.food_id == food.id).\ scalar() sequence = 1 if prev_sequence: sequence = int(prev_sequence) + 1 return { 'food_id': food.id, 'sequence': sequence, 'amount': row_in[1], 'measurement_desc': row_in[2], 'grams': row_in[3] } def process_row_local_food_weight_alias(row_in, args): session = args['session'] food = session.\ query(model.Food).\ filter(model.Food.long_desc == row_in[0]).\ one() session.\ query(model.Weight).\ filter(and_( model.Weight.food_id == food.id, model.Weight.measurement_desc == row_in[2], )).\ delete() session.commit() weight = session.\ query(model.Weight).\ filter(model.Weight.food_id == food.id).\ filter(model.Weight.measurement_desc == row_in[1]).\ one() prev_sequence = session.\ query(func.max(model.Weight.sequence)).\ filter(model.Weight.food_id == food.id).\ scalar() sequence = 1 if prev_sequence: sequence = int(prev_sequence) + 1 return { 'food_id': food.id, 'sequence': sequence, 'amount': weight.amount, 'measurement_desc': row_in[2], 'grams': weight.grams, 'num_data_points': weight.num_data_points, 'std_dev': weight.std_dev } def db_import_nutrient_category_map_file(engine, session, fname): print("Processing file '{}'".format(fname)) # Sigh. There are two instances of the nutrient, 'Energy', each # with a different unit of measurement: kcal and kJ. Rename # the nutrient before proceeding. energies = session.\ query(model.Nutrient).\ filter(model.Nutrient.name == 'Energy') for energy in energies: if energy.units == 'kcal': energy.name = 'Energy (kcal)' elif energy.units == 'kJ': energy.name = 'Energy (kJ)' session.add(energy) session.commit() with open(fname) as f: csvreader = csv.reader(f, delimiter='|') rows_out = [] for row_in in csvreader: nutrient = session.\ query(model.Nutrient).\ filter(model.Nutrient.name == row_in[0]).\ one() category = session.\ query(model.NutrientCategory).\ filter(model.NutrientCategory.name == row_in[1]).\ one() nutrient.category_id = category.id session.add(nutrient) session.commit() def process_row_local_food_nutrient_data(row_in, args): session = args['session'] try: food = session.\ query(model.Food).\ filter(model.Food.long_desc == row_in[0]).\ one() except sqlalchemy.orm.exc.NoResultFound: raise ValueError("Unable to find USDA Food '{}'".format(row_in[0])) except sqlalchemy.orm.exc.MultipleResultsFound: raise ValueError("Multiple results of food '{}'".format(row_in[0])) try: nutrient = session.\ query(model.Nutrient).\ filter(model.Nutrient.name == row_in[1]).\ one() except sqlalchemy.orm.exc.NoResultFound: raise ValueError("Unable to find nutrient '{}'".format(row_in[1])) except sqlalchemy.orm.exc.MultipleResultsFound: raise ValueError("Multiple results of nutrient '{}'".format(row_in[1])) return { 'food_id': food.id, 'nutrient_id': nutrient.id, 'source_code_id': 9, 'value': row_in[2], 'num_data_points': 0 } def process_row_local_food_nutrient_data_alias(row_in, args): session = args['session'] try: dst_food = session.\ query(model.Food).\ filter(model.Food.long_desc == row_in[0]).\ one() except sqlalchemy.orm.exc.NoResultFound: raise ValueError("Unable to find destination food '{}'".format(row_in[0])) except sqlalchemy.orm.exc.MultipleResultsFound: raise ValueError("Multiple results of destination food '{}'".format(row_in[0])) session.\ query(model.FoodNutrientData).\ filter(model.FoodNutrientData.food_id == dst_food.id).\ delete() session.commit() try: src_food = session.\ query(model.Food).\ filter(model.Food.long_desc == row_in[1]).\ one() except sqlalchemy.orm.exc.NoResultFound: raise ValueError("Unable to find source food '{}'".format(row_in[1])) except sqlalchemy.orm.exc.MultipleResultsFound: raise ValueError("Multiple results of source food '{}'".format(row_in[1])) src_nutrient_data = session.\ query(model.FoodNutrientData).\ filter(model.FoodNutrientData.food_id == src_food.id).\ all() for nutrient_datum in src_nutrient_data: session.expunge(nutrient_datum) make_transient(nutrient_datum) nutrient_datum.food_id = dst_food.id session.add(nutrient_datum) session.commit() return None def db_import(engine, session, data_dir): # Only drop the USDA tables as the model may be extended by another # module. for name, obj in inspect.getmembers(sys.modules['usdanutrient.model']): if (inspect.isclass(obj) and obj.__module__ == 'usdanutrient.mod
el'): obj.__table__.drop(engine, checkfirst=True) obj.__table__.create(engine) fnames = os.listdir(data_dir) for fname in fnames: table_class = None col_order = [] full_fname = os.path.join(data_dir, fname) if fname == 'DATA_SRC.txt': table_class = model.DataSource col_order = ['id', 'auth
ors', 'title', 'year', 'journal', 'volume_city', 'issue_state', 'start_page', 'end_page'] elif fname == 'DATSRCLN.txt': table_class = model.FoodNutrientDataSourceMap col_order = ['food_id', 'nutrient_id', 'data_source_id'] elif fname == 'DERIV_CD.txt': table_class = model.DerivationCode col_order = ['id', 'desc'] elif fname == 'FD_GROUP.txt': table_class = model.FoodGroup col_order = ['id', 'name'] elif fname == 'FOOD_DES.txt': table_class = model.Food col_order = ['id', 'group_id', 'long_desc', 'short_desc', 'common_name', 'manufacturer', 'has_fndds_profile', 'refuse_desc', 'refuse_pct', 'sci_name', 'nitrogen_protein_factor', 'protein_calories_factor', 'fat_calories_factor', 'carb_calories_factor'] elif fname == 'FOOTNOTE.txt': table_class = model.Footnote col_order = ['food_id', 'orig_id', 'type', 'nutrient_id', 'desc'] elif fname == 'LANGDESC.txt': table_class = model.Langual col_order = ['id', 'desc'] elif fname == 'LANGUAL.txt': table_class = model.FoodLangualMap col_order = ['food_id', 'langual_id'] elif fname == 'NUT_DATA.txt': table_class = model.FoodNutrientData col_o
self._pack_algo = pack_algo self._algo_kwargs = kwargs self._algo_args = args self._ref_bin = None # Reference bin used to calculate fitness self._bid = kwargs.get("bid", None) def _create_bin(self): return self._pack_algo(self._width, self._height, *self._algo_args, **self._algo_kwargs) def is_empty(self): return self._count<1 def fitness(self, width, height): if not self._ref_bin: self._ref_bin = self._create_bin() return self._ref_bin.fitness(width, height) def fits_inside(self, width, height): # Determine if rectangle widthxheight will fit into empty bin if not self._ref_bin: self._ref_bin = self._create_bin() return self._ref_bin._fits_surface(width, height) def new_bin(self): if self._count > 0: self._count -= 1 return self._create_bin() else: return None def __eq__(self, other): return self._width*self._height == other._width*other._height def __lt__(self, other): return self._width*self._height < other._width*other._height def __str__(self): return "Bin: {} {} {}".format(self._width, self._height, self._count) class PackerBNFMixin(object): """ BNF (Bin Next Fit): Only one open bin at a time. If the rectangle doesn't fit, close the current bin and go to the next. """ def add_rect(self, width, height, rid=None): while True: # if there are no open bins, try to open a new one if len(self._open_bins)==0: # can we find an unopened bin that will hold this rect? new_bin = self._new_open_bin(width, height, rid=rid) if new_bin is None: return None # we have at least one open bin, so check if it can hold this rect rect = self._open_bins[0].add_rect(width, height, rid=rid) if rect is not None: return rect # since the rect doesn't fit, close this bin and try again closed_bin = self._open_bins.popleft() self._closed_bins.append(closed_bin) class PackerBFFMixin(object): """ BFF (Bin First Fit): Pack rectangle in first bin it fits """ def add_rect(self, width, height, rid=None): # see if this rect will fit in any of the open bins for b in self._open_bins: rect = b.add_rect(width, height, rid=rid) if rect is not None: return rect while True: # can we find an unopened bin that will hold this rect? new_bin = self._new_open_bin(width, height, rid=rid) if new_bin is None: return None # _new_open_bin may return a bin that's too small, # so we have to double-check rect = new_bin.add_rect(width, height, rid=rid) if rect is not None: return rect class PackerBBFMixin(object): """ BBF (Bin Best Fit): Pack rectangle in bin that gives best fitness """ # only create this getter once first_item = operator.itemgetter(0) def add_rect(self, width, height, rid=None): # Try packing into open bins fit = ((b.fitness(width, height), b) for b in self._open_bins) fit = (b for b in fit if b[0] is not None) try: _, best_bin = min(fit, key=self.first_item) best_bin.add_rect(width, height, rid) return True except ValueError: pass # Try packing into one of the empty bins while True: # can we find an unopened bin that will hold this rect? new_bin = self._new_open_bin(width, height, rid=rid) if new_bin is None: return False # _new_open_bin may return a bin that's too small, # so we have to double-check if new_bin.add_rect(width, height, rid): return True class PackerOnline(object): """ Rectangles are packed as soon are they are added """ def __init__(self, pack_algo=MaxRectsBssf, rotation=True): """ Arguments: pack_algo (PackingAlgorith
m): What packing algo to use rotatio
n (bool): Enable/Disable rectangle rotation """ self._rotation = rotation self._pack_algo = pack_algo self.reset() def __iter__(self): return itertools.chain(self._closed_bins, self._open_bins) def __len__(self): return len(self._closed_bins)+len(self._open_bins) def __getitem__(self, key): """ Return bin in selected position. (excluding empty bins) """ if not isinstance(key, int): raise TypeError("Indices must be integers") size = len(self) # avoid recalulations if key < 0: key += size if not 0 <= key < size: raise IndexError("Index out of range") if key < len(self._closed_bins): return self._closed_bins[key] else: return self._open_bins[key-len(self._closed_bins)] def _new_open_bin(self, width=None, height=None, rid=None): """ Extract the next empty bin and append it to open bins Returns: PackingAlgorithm: Initialized empty packing bin. None: No bin big enough for the rectangle was found """ factories_to_delete = set() # new_bin = None for key, binfac in self._empty_bins.items(): # Only return the new bin if the rect fits. # (If width or height is None, caller doesn't know the size.) if not binfac.fits_inside(width, height): continue # Create bin and add to open_bins new_bin = binfac.new_bin() if new_bin is None: continue self._open_bins.append(new_bin) # If the factory was depleted mark for deletion if binfac.is_empty(): factories_to_delete.add(key) break # Delete marked factories for f in factories_to_delete: del self._empty_bins[f] return new_bin def add_bin(self, width, height, count=1, **kwargs): # accept the same parameters as PackingAlgorithm objects kwargs['rot'] = self._rotation bin_factory = BinFactory(width, height, count, self._pack_algo, **kwargs) self._empty_bins[next(self._bin_count)] = bin_factory def rect_list(self): rectangles = [] bin_count = 0 for abin in self: for rect in abin: rectangles.append((bin_count, rect.x, rect.y, rect.width, rect.height, rect.rid)) bin_count += 1 return rectangles def bin_list(self): """ Return a list of the dimmensions of the bins in use, that is closed or open containing at least one rectangle """ return [(b.width, b.height) for b in self] def validate_packing(self): for b in self: b.validate_packing() def reset(self): # Bins fully packed and closed. self._closed_bins = collections.deque() # Bins ready to pack rectangles self._open_bins = collections.deque() # User provided bins not in current use self._empty_bins = collections.OrderedDict() # O(1) deletion of arbitrary elem self._bin_count = itertools.count() class Packer(PackerOnline): """ Rectangles aren't packed untils pack() is called """ def __init__(self, pack_algo=MaxRectsBssf, sort_algo=SORT_NONE, rotation=True): """ """ super(Packer, self).__init__(pack_algo=pack_algo, rotation=rotation) self._sort_algo = sort_algo # User provided bins and Rectangles self._avail_bins = collections.deque() self._avail_rect = collections.deque() # Aux vars used during packing se
# # # File to test behaviour of the Golgi Cell. # # To execute this type of file, type '..\..\..\nC.bat -python XXX.py' (Windows) # or '../../../nC.sh -python XXX.py' (Linux/Mac). Note: you may have to update the # NC_HOME and NC_MAX_MEMORY variables in nC.bat/nC.sh # # Author: Padraig Gleeson # # This file has been developed as part of the neuroConstruct project # This work has been funded by the Medical Research Council and the # Wellcome Trust # # import sys import os try: from java.io import File except ImportError: print "Note: this file should be run using ..\\..\\..\\nC.bat -python XXX.py' or '../../../nC.sh -python XXX.py'" pr
int "See http://www.neuroconstruct.org/docs/python.html for more details" quit() sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils") import ncutils as nc projFile = File(".
./Cerebellum.ncx") ############## Main settings ################## simConfigs = [] #simConfigs.append("Default Simulation Configuration") simConfigs.append("Single Golgi Cell") simDt = 0.001 simulators = ["NEURON", "GENESIS_PHYS", "GENESIS_SI"] # Note: nernst object isn't implemented in MOOSE yet varTimestepNeuron = True varTimestepTolerance = 0.00001 plotSims = True plotVoltageOnly = True runInBackground = True analyseSims = True verbose = False ############################################# def testAll(argv=None): if argv is None: argv = sys.argv print "Loading project from "+ projFile.getCanonicalPath() simManager = nc.SimulationManager(projFile, verbose = verbose) simManager.runMultipleSims(simConfigs = simConfigs, simDt = simDt, simulators = simulators, runInBackground = runInBackground, varTimestepNeuron = varTimestepNeuron, varTimestepTolerance = varTimestepTolerance) simManager.reloadSims(plotVoltageOnly = plotVoltageOnly, analyseSims = analyseSims) # These were discovered using analyseSims = True above. # They need to hold for all simulators spikeTimesToCheck = {'SingleGolgi_0': [12.2, 33.5, 93.0, 197.4, 310.1, 424.8, 508.0, 529.3, 564.5, 613.8, 668.3, 724.1, 780.2, 836.6, 893.0, 949.5, 1157.6, 1277.6, 1394.4]} spikeTimeAccuracy = 1 # Note run time of 1500 ms... report = simManager.checkSims(spikeTimesToCheck = spikeTimesToCheck, spikeTimeAccuracy = spikeTimeAccuracy) print report return report if __name__ == "__main__": testAll()
# -*- coding: utf-8 -*- from flask_restful import reqparse from app.mod_profiles.validators.generic_validators import is_valid_id # Parser
general parser = reqparse.RequestParser() parser.add_argument('username', type=str, required=True) parser.add_argument('email', type=str, required=True) parser.add_argument('password', type=str, required=True) parser.add_argument('profile_id', typ
e=is_valid_id, required=True) # Parser para recurso POST parser_post = parser.copy() # Parser para recurso PUT parser_put = parser.copy() parser_put.remove_argument('password') parser_put.add_argument('password', type=str)
"""Models for SQLAlchemy. This file contains the original models definitions before schema tracking was implemented. It is used to test the schema migration logic. """ import json from datetime import datetime import logging from sqlalchemy import (Boolean, Column, DateTime, ForeignKey, Index, Integer, String, Text, distinct) from sqlalchemy.ext.declarative import declarative_base import homeassistant.util.dt as dt_util from homeassistant.core import Event, EventOrigin, State, split_entity_id from homeassistant.remote import JSONEncoder # SQLAlchemy Schema # pylint: disable=invalid-name Base = declarative_base() _LOGGER = logging.getLogger(__name__) class Events(Base): # type: ignore """Event history data.""" __tablename__ = 'events' event_id = Column(Integer, primary_key=True) event_type = Column(String(32), index=True) event_data = Column(Text) origin = Column(String(32)) time_fired = Column(DateTime(timezone=True)) created = Column(DateTime(timezone=True), default=datetime.utcnow) @staticmethod def from_event(event): """Create an event database object from a native event.""" return Events(event_type=event.event_type, event_data=json.dumps(event.data, cls=JSONEncoder), origin=str(event.origin), time_fired=event.time_fired) def to_native(self): """Convert to a natve HA Event.""" try: return Event( self.event_type, json.loads(self.event_data), Ev
entOrigin(self.origin), _process_timestamp(self.time_fired) ) except ValueError: # When json.loads fails _LOGGER.exception("Error converting to event: %s", self) return None class States(Base): # type: ignore """State change history.""" __tablename__ = 'states' stat
e_id = Column(Integer, primary_key=True) domain = Column(String(64)) entity_id = Column(String(255)) state = Column(String(255)) attributes = Column(Text) event_id = Column(Integer, ForeignKey('events.event_id')) last_changed = Column(DateTime(timezone=True), default=datetime.utcnow) last_updated = Column(DateTime(timezone=True), default=datetime.utcnow) created = Column(DateTime(timezone=True), default=datetime.utcnow) __table_args__ = (Index('states__state_changes', 'last_changed', 'last_updated', 'entity_id'), Index('states__significant_changes', 'domain', 'last_updated', 'entity_id'), ) @staticmethod def from_event(event): """Create object from a state_changed event.""" entity_id = event.data['entity_id'] state = event.data.get('new_state') dbstate = States(entity_id=entity_id) # State got deleted if state is None: dbstate.state = '' dbstate.domain = split_entity_id(entity_id)[0] dbstate.attributes = '{}' dbstate.last_changed = event.time_fired dbstate.last_updated = event.time_fired else: dbstate.domain = state.domain dbstate.state = state.state dbstate.attributes = json.dumps(dict(state.attributes), cls=JSONEncoder) dbstate.last_changed = state.last_changed dbstate.last_updated = state.last_updated return dbstate def to_native(self): """Convert to an HA state object.""" try: return State( self.entity_id, self.state, json.loads(self.attributes), _process_timestamp(self.last_changed), _process_timestamp(self.last_updated) ) except ValueError: # When json.loads fails _LOGGER.exception("Error converting row to state: %s", self) return None class RecorderRuns(Base): # type: ignore """Representation of recorder run.""" __tablename__ = 'recorder_runs' run_id = Column(Integer, primary_key=True) start = Column(DateTime(timezone=True), default=datetime.utcnow) end = Column(DateTime(timezone=True)) closed_incorrect = Column(Boolean, default=False) created = Column(DateTime(timezone=True), default=datetime.utcnow) def entity_ids(self, point_in_time=None): """Return the entity ids that existed in this run. Specify point_in_time if you want to know which existed at that point in time inside the run. """ from sqlalchemy.orm.session import Session session = Session.object_session(self) assert session is not None, 'RecorderRuns need to be persisted' query = session.query(distinct(States.entity_id)).filter( States.last_updated >= self.start) if point_in_time is not None: query = query.filter(States.last_updated < point_in_time) elif self.end is not None: query = query.filter(States.last_updated < self.end) return [row[0] for row in query] def to_native(self): """Return self, native format is this model.""" return self def _process_timestamp(ts): """Process a timestamp into datetime object.""" if ts is None: return None elif ts.tzinfo is None: return dt_util.UTC.localize(ts) else: return dt_util.as_utc(ts)
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest from unittest import mock from airflow.utils.python_virtualenv import prepare_virtualenv class TestPrepareVirtualenv(unittest.TestCase): @mock.patch('airflow.utils.python_virtualenv.execute_in_subprocess') def test_should_create_virtualenv(self, mock_execute_in_subprocess): python_bin = prepare_virtualenv( venv_directory="/VENV", python_bin="pythonVER", system_site_packages=False, requirements=[] ) self.assertEqual("/VENV/bin/python", python_bin) mock_execute_in_subprocess.assert_called_once_with(['virtualenv', '/VENV', '--python=pythonVER']) @mock.patch('airflow.utils.python_virtualenv.execute_in_subprocess') def test_should_create_virtualenv_with_system_packages(self, mock_execute_in_subprocess): python_bin = prepare_virtualenv(
venv_directory="/VENV", python_bin="pythonVER", system_site_packages=True, requirements=[] ) self.assertEqual("/VENV/bin/python", python_bin) mock_execute_in_subprocess.assert_called_once_with( ['virtuale
nv', '/VENV', '--system-site-packages', '--python=pythonVER'] ) @mock.patch('airflow.utils.python_virtualenv.execute_in_subprocess') def test_should_create_virtualenv_with_extra_packages(self, mock_execute_in_subprocess): python_bin = prepare_virtualenv( venv_directory="/VENV", python_bin="pythonVER", system_site_packages=False, requirements=['apache-beam[gcp]'], ) self.assertEqual("/VENV/bin/python", python_bin) mock_execute_in_subprocess.assert_any_call(['virtualenv', '/VENV', '--python=pythonVER']) mock_execute_in_subprocess.assert_called_with(['/VENV/bin/pip', 'install', 'apache-beam[gcp]'])
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #################################################
############################## from datetime import datetime from dateutil.relativedelta import relativedelta from openerp import api, fields, models from openerp.exceptions import UserError class PersonManagement(models.Model): _name = 'myo.person.mng' name = fields.Char('Name', required=True) alias = fields.Char('Alias', help='Common name that the Person is referred.') code = fields.Char(string='Person Code', required=False) notes = fields.Text(string='Notes') date_inclusion = fields.Datetime("Inclusion Date", required=False, readonly=False, default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S')) batch_name = fields.Char('Batch Name', required=False) country_id_2 = fields.Many2one('res.country', 'Nationality') birthday = fields.Date("Date of Birth") age = fields.Char( string='Age', compute='_compute_age', store=True ) estimated_age = fields.Char(string='Estimated Age', required=False) spouse_name = fields.Char('Spouse Name') spouse_id = fields.Many2one('myo.person', 'Spouse', ondelete='restrict') father_name = fields.Char('Father Name') father_id = fields.Many2one('myo.person', 'Father', ondelete='restrict') mother_name = fields.Char('Mother Name') mother_id = fields.Many2one('myo.person', 'Mother', ondelete='restrict') responsible_name = fields.Char('Responsible Name') responsible_id = fields.Many2one('myo.person', 'Responsible', ondelete='restrict') identification_id = fields.Char('Person ID') otherid = fields.Char('Other ID') gender = fields.Selection( [('M', 'Male'), ('F', 'Female') ], 'Gender' ) marital = fields.Selection( [('single', 'Single'), ('married', 'Married'), ('widower', 'Widower'), ('divorced', 'Divorced'), ], 'Marital Status' ) active = fields.Boolean('Active', help="If unchecked, it will allow you to hide the person without removing it.", default=1) person_id = fields.Many2one('myo.person', 'Person') _order = 'name' _sql_constraints = [ ('code_uniq', 'UNIQUE(code)', u'Error! The Person Code must be unique!' ) ] @api.multi @api.constrains('birthday') def _check_birthday(self): for person in self: if person.birthday > fields.Date.today(): raise UserError(u'Error! Date of Birth must be in the past!') @api.one @api.depends('birthday') def _compute_age(self): now = datetime.now() if self.birthday: dob = datetime.strptime(self.birthday, '%Y-%m-%d') delta = relativedelta(now, dob) # self.age = str(delta.years) + "y " + str(delta.months) + "m " + str(delta.days) + "d" self.age = str(delta.years) else: self.age = "No Date of Birth!"
#!/usr/bin/env python """ This script accepts .csv pipeline output and gives a .ps file with a basic tree structure """ __author__ = "Paul Donovan" __maintainer__ = "Paul Donovan" __email__ = "pauldonovandonegal@gmail.com" import sys import argparse from ete3 import NCBITaxa #Display help and usage parser = argparse.ArgumentParser(description="Incorrect number of command line arguments") parser.add_argument('Sorted-LCA.csv') parser.add_argument('Output.gv') if len(sys.argv[1:]) == 0: parser.print_help() parser.exit() args = parser.parse_args() ncbi = NCBITaxa() #The number of species you want to create the tree with NumberOfSpecies = 10 #Read CSV results into list, remove all but the top 10 most abundant taxonomies ResultsList = list(line.strip().split(",") for line in open(sys.argv[1])) ResultsList = ResultsList[0:int(NumberOfSpecies) + 1] #Take first n items in list (+1 is to negate the header line) #Open output file for writing Output = open(sys.argv[2], "w") #Write header line in dot format Output.write('digraph G {\n\tsize="8,5!";\n') #Define lists, dicts and variables ResultTaxids = list() TreeList = list() BadChars = "()[]{}/|" TaxidFreqDict = {} Counter = 0 #Re-open CSV file, create a dictionary with taxid as key and number of reads as value with open(sys.argv[1]) as f: for line in f: if not line.startswith("#"): tok = line.strip().split(",") TaxidFreqDict[tok[1]] = tok[2] #Build the dot script for line in ResultsList: if line[0].startswith("#"): pass else: ResultTaxid = line[1] ResultTaxids.append(ResultTaxid) lineage = ncbi.get_lineage(ResultTaxid) for index, taxid in enumerate(lineage): name = ncbi.get_taxid_translator([str(taxid)]) name = name[taxid] for char in name: if char in BadChars: name = nam
e.replace(str(char),"_") #Replace ugly strings NextIndex = int(index) + 1 if NextIndex == len(lineage): pass else: NextTaxid = lineage[NextIndex] NextName = ncbi.get_taxid_translator([str(NextTaxid)]) NextName = NextName[NextTaxid] for char in NextN
ame: if char in BadChars: NextName = NextName.replace(str(char),"_") #Replace ugly strings NodeToNode = str('\t"' + str(name) + '" -> "' + str(NextName) + '";\n') if any(NodeToNode in s for s in TreeList): pass else: Output.write(NodeToNode) TreeList.append(NodeToNode) if str(NextTaxid) in TaxidFreqDict: #If there is information available about number of reads for this taxid, use it value = TaxidFreqDict[str(NextTaxid)] Freq = format(int(value), ",d") #Adds commas to numbers to make them more human-readable Output.write(str('\t"' + str(NextName) + '" [xlabel="' + str(Freq) + ' reads"];\n')) Output.write("}") Output.close()
('netcdf-c~mpi', when='~mpi') depends_on('netcdf-c+mpi', when='+mpi') depends_on('netcdf-cxx') depends_on('libpng') depends_on('libtiff') depends_on('zlib') depends_on('eigen', when='@8.2.0:'
) depends_on('double-conversion', when='@8.2.0:') depends_on('sqlite', when='@8.2.0:') # For finding Fujitsu-MPI wrapper commands patch('find_fujitsu_mpi.patch', when='@:8.2.0%fj') def url_for_version(self, version): url = "http://www.vtk.org/files/release/{0}/VTK-{1}.tar.gz" return url.format(version.up_to(2), version) def setup_bui
ld_environment(self, env): # VTK has some trouble finding freetype unless it is set in # the environment env.set('FREETYPE_DIR', self.spec['freetype'].prefix) def cmake_args(self): spec = self.spec opengl_ver = 'OpenGL{0}'.format('2' if '+opengl2' in spec else '') cmake_args = [ '-DBUILD_SHARED_LIBS=ON', '-DVTK_RENDERING_BACKEND:STRING={0}'.format(opengl_ver), # In general, we disable use of VTK "ThirdParty" libs, preferring # spack-built versions whenever possible '-DVTK_USE_SYSTEM_LIBRARIES:BOOL=ON', # However, in a few cases we can't do without them yet '-DVTK_USE_SYSTEM_GL2PS:BOOL=OFF', '-DVTK_USE_SYSTEM_LIBHARU=OFF', '-DNETCDF_DIR={0}'.format(spec['netcdf-c'].prefix), '-DNETCDF_C_ROOT={0}'.format(spec['netcdf-c'].prefix), '-DNETCDF_CXX_ROOT={0}'.format(spec['netcdf-cxx'].prefix), # Allow downstream codes (e.g. VisIt) to override VTK's classes '-DVTK_ALL_NEW_OBJECT_FACTORY:BOOL=ON', # Disable wrappers for other languages. '-DVTK_WRAP_JAVA=OFF', '-DVTK_WRAP_TCL=OFF', ] # Some variable names have changed if spec.satisfies('@8.2.0:'): cmake_args.extend([ '-DVTK_USE_SYSTEM_OGG:BOOL=OFF', '-DVTK_USE_SYSTEM_THEORA:BOOL=OFF', '-DVTK_USE_SYSTEM_LIBPROJ:BOOL=OFF', '-DVTK_USE_SYSTEM_PUGIXML:BOOL=OFF', ]) else: cmake_args.extend([ '-DVTK_USE_SYSTEM_OGGTHEORA:BOOL=OFF', '-DVTK_USE_SYSTEM_LIBPROJ4:BOOL=OFF', ]) if '+mpi' in spec: if spec.satisfies('@:8.2.0'): cmake_args.extend([ '-DVTK_Group_MPI:BOOL=ON', '-DVTK_USE_SYSTEM_DIY2:BOOL=OFF' ]) else: cmake_args.extend([ '-DVTK_USE_MPI=ON' ]) if '+ffmpeg' in spec: cmake_args.extend(['-DModule_vtkIOFFMPEG:BOOL=ON']) # Enable/Disable wrappers for Python. if '+python' in spec: cmake_args.extend([ '-DVTK_WRAP_PYTHON=ON', '-DPYTHON_EXECUTABLE={0}'.format(spec['python'].command.path), ]) if '+mpi' in spec: cmake_args.append('-DVTK_USE_SYSTEM_MPI4PY:BOOL=ON') if spec.satisfies('@9.0.0: ^python@3:'): cmake_args.append('-DVTK_PYTHON_VERSION=3') else: cmake_args.append('-DVTK_WRAP_PYTHON=OFF') if 'darwin' in spec.architecture: cmake_args.extend([ '-DCMAKE_MACOSX_RPATH=ON' ]) if '+qt' in spec: qt_ver = spec['qt'].version.up_to(1) qt_bin = spec['qt'].prefix.bin qmake_exe = os.path.join(qt_bin, 'qmake') cmake_args.extend([ # Enable Qt support here. '-DVTK_QT_VERSION:STRING={0}'.format(qt_ver), '-DQT_QMAKE_EXECUTABLE:PATH={0}'.format(qmake_exe), '-DVTK_Group_Qt:BOOL=ON', ]) # NOTE: The following definitions are required in order to allow # VTK to build with qt~webkit versions (see the documentation for # more info: http://www.vtk.org/Wiki/VTK/Tutorials/QtSetup). if '~webkit' in spec['qt']: cmake_args.extend([ '-DVTK_Group_Qt:BOOL=OFF', '-DModule_vtkGUISupportQt:BOOL=ON', '-DModule_vtkGUISupportQtOpenGL:BOOL=ON', ]) if '+xdmf' in spec: if spec.satisfies('^cmake@3.12:'): # This policy exists only for CMake >= 3.12 cmake_args.extend(["-DCMAKE_POLICY_DEFAULT_CMP0074=NEW"]) cmake_args.extend([ # Enable XDMF Support here "-DModule_vtkIOXdmf2:BOOL=ON", "-DModule_vtkIOXdmf3:BOOL=ON", "-DBOOST_ROOT={0}".format(spec['boost'].prefix), "-DBOOST_LIBRARY_DIR={0}".format(spec['boost'].prefix.lib), "-DBOOST_INCLUDE_DIR={0}".format(spec['boost'].prefix.include), "-DBOOST_NO_SYSTEM_PATHS:BOOL=ON", # This is needed because VTK has multiple FindBoost # and they stick to system boost if there's a system boost # installed with CMake "-DBoost_NO_BOOST_CMAKE:BOOL=ON", "-DHDF5_ROOT={0}".format(spec['hdf5'].prefix), # The xdmf project does not export any CMake file... "-DVTK_USE_SYSTEM_XDMF3:BOOL=OFF", "-DVTK_USE_SYSTEM_XDMF2:BOOL=OFF" ]) if '+mpi' in spec: cmake_args.extend(["-DModule_vtkIOParallelXdmf3:BOOL=ON"]) cmake_args.append('-DVTK_RENDERING_BACKEND:STRING=' + opengl_ver) if spec.satisfies('@:8.1.0'): cmake_args.append('-DVTK_USE_SYSTEM_GLEW:BOOL=ON') if '+osmesa' in spec: cmake_args.extend([ '-DVTK_USE_X:BOOL=OFF', '-DVTK_USE_COCOA:BOOL=OFF', '-DVTK_OPENGL_HAS_OSMESA:BOOL=ON']) else: cmake_args.append('-DVTK_OPENGL_HAS_OSMESA:BOOL=OFF') if spec.satisfies('@:7.9.9'): # This option is gone in VTK 8.1.2 cmake_args.append('-DOpenGL_GL_PREFERENCE:STRING=LEGACY') if 'darwin' in spec.architecture: cmake_args.extend([ '-DVTK_USE_X:BOOL=OFF', '-DVTK_USE_COCOA:BOOL=ON']) elif 'linux' in spec.architecture: cmake_args.extend([ '-DVTK_USE_X:BOOL=ON', '-DVTK_USE_COCOA:BOOL=OFF']) if spec.satisfies('@:6.1.0'): cmake_args.extend([ '-DCMAKE_C_FLAGS=-DGLX_GLXEXT_LEGACY', '-DCMAKE_CXX_FLAGS=-DGLX_GLXEXT_LEGACY' ]) # VTK 6.1.0 (and possibly earlier) does not use # NETCDF_CXX_ROOT to detect NetCDF C++ bindings, so # NETCDF_CXX_INCLUDE_DIR and NETCDF_CXX_LIBRARY must be # used instead to detect these bindings netcdf_cxx_lib = spec['netcdf-cxx'].libs.joined() cmake_args.extend([ '-DNETCDF_CXX_INCLUDE_DIR={0}'.format( spec['netcdf-cxx'].prefix.include), '-DNETCDF_CXX_LIBRARY={0}'.format(netcdf_cxx_lib), ]) # Garbage collection is unsupported in Xcode starting with # version 5.1; if the Apple clang version of the compiler # is 5.1.0 or later, unset the required Objective-C flags # to remove the garbage collection flags. Versions of VTK # after 6.1.0 set VTK_REQUIRED_OBJCXX_FLAGS to the empty # string. This fix was recommended on the VTK mailing list # in March 2014 (see # https://public.kitware.com/pipermail/vtkusers/2014-March/083368.html) if self.spec.satisfies('%apple-clang@5.1.0:'): cmake_args.extend(['-DVTK_REQUIRED_OBJCXX_FLAGS=']) # A bug in tao pegtl causes build failures with intel compilers if '%intel' in spec and spec.version >= Version('8.2'): cmake_args.append( '-DVTK_MODU
"""Utility functions for plots.""" from functools import wraps from os.path import join as pjoin import matplotlib.pyplot as plt ################################################################################################### ################################################################################################### def check_ax(ax, figsize=None): """Check whether a figure axes object is defined, define if not. Parameters ---------- ax : matplotlib.Axes or None Axes object to check if is defined. Returns ------- ax : matplotlib.Axes Figure axes object to use. """ if not ax: _, ax = plt.subplots(figsize=figsize) return ax def savefig(func): """Decorator function to save out figures.""" @wraps(func) def decorated(*args, **kwargs): # Grab file name and path arguments, if they are in kwargs file_name = kwargs.pop('file_name', None) file_path = kwargs.
pop('file_path', None) # Check for an explicit argument for whether to save figure or not # Defaults to saving when file name given (since bool(str)->True; bool(None)->False) save_fig = kwargs.pop('save_fig', bool(file_name)) # Check any collect any other plot keywords save_kwargs = kwargs.pop('save_kwargs', {}) save_kwarg
s.setdefault('bbox_inches', 'tight') # Check and collect whether to close the plot close = kwargs.pop('close', None) func(*args, **kwargs) if save_fig: full_path = pjoin(file_path, file_name) if file_path else file_name plt.savefig(full_path, **save_kwargs) if close: plt.close() return decorated
#!/usr/bin/python # gen_numerics.py: generate numerics.h import numerics print """/* quIRC - simple terminal-based IRC client Copyright (C) 2010-13 Edward Cree See quirc.c for license information numeric: IRC numeric replies */ /*** This file is generated by gen_numerics.py from masters in numerics.py. Do not make edits directly to this file! Edit the masters instead. ***/ /* A symbolic name defined here does not necessarily imply recognition or decoding of that numeric reply. Some numeric replies are non-normative; that is, they are not defined in the original RFC1459 or its superseding RFC2812, but instead are either defined in other, non-normative documents, or are entirely experimental. These are denoted with an X before the name (of the form RPL_X_BOGOSIT
Y); where a numeric is being i
dentified purely on the basis of usage "in the wild", the symbolic name will be completely arbitrary and may not align with usage elsewhere. */ /* Error replies */""" errs = [n for n in numerics.nums.values() if isinstance(n, numerics.NumericError)] for e in errs: print str(e) print """ /* Command responses */""" rpls = [n for n in numerics.nums.values() if isinstance(n, numerics.NumericReply)] for r in rpls: print str(r)
import django_filters from rest_framework import filters class CaseInsensitiveBooleanFilter(django_filters.Filter): # The default django_filters boolean filt
er *only* accepts True and False # which is problematic when dealing with non-Python clients. This allows # the lower case variants, as well as 0 and 1. def filter(self, qs, value): if value is no
t None: lc_value = value.lower() if lc_value in ["true", "1"]: value = True elif lc_value in ["false", "0"]: value = False return qs.filter(**{self.field_name: value}) return qs class AliasedOrderingFilter(filters.OrderingFilter): aliases = {} def get_valid_fields(self, *args, **kwargs): valid_fields = super().get_valid_fields(*args, **kwargs) for alias, mapping in self.aliases.items(): valid_fields.append((alias, mapping[1])) return valid_fields def get_ordering(self, *args, **kwargs): ordering = super().get_ordering(*args, **kwargs) if ordering is not None: return list(map(self.replace_alias, ordering)) return ordering def replace_alias(self, term): field = term.lstrip("-") if field in self.aliases: modifier = "-" if term.startswith("-") else "" return modifier + self.aliases[field][0] return term
""" W
SGI config for mysite project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os, sys from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evewspace.settings") applicatio
n = get_wsgi_application()
whom the Software is furnished to do # so, subject to the following conditions: # # The abov
e copyright notice and this permission notice shall be included in all # copies or substantial portions of the Sof
tware. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Set to False to test alternative image processor use_PIL = True # The port to use with IPC for opening/saving image files nopil_port = 4859 import string from multiprocessing import Process import imp import os import tempfile import subprocess from time import sleep import atexit import socket # Remove our temporary files when the module is unloaded temp_files = [] def cleanup_temp(): for filename in temp_files: os.remove(filename) atexit.register(cleanup_temp) try: # Do not attempt an import here # Tkinter can't be loaded in a process and its subprocesses simultaneously imp.find_module('Tkinter') _has_Tk = True except: _has_Tk = False def _pil_open(filename): image = PILImage.open(filename) data = image.getdata() # Only get the RGB components in case the image is ARGB data = [tuple(color[len(color) - 3:]) for color in data] return (data, image.size) def _nopil_open_pipe(filename): # Run a java utility to print out the pixels of the image to stdout command = ['java', '-jar', 'ImagePiper.jar', 'read', filename] image_piper = subprocess.Popen(command, stdout=subprocess.PIPE) # Read the output from ImagePiper stdout, stderr = image_piper.communicate() lines = stdout.splitlines() # Read the encoding from the first line of output radix = int(lines.pop(0)) # Read the width and the height from the second line of output w, h = tuple(int(x, radix) for x in lines.pop(0).split()) # Read the pixels line by line, with each line corresponding to a line from the image data = [Color.int_to_rgb(int(pixel, radix)) for line in lines for pixel in line.split()] return (data, (w, h)) def _bytes_to_int(bs): return sum(ord(bs[i]) << (8 * (len(bs) - i - 1)) for i in xrange(len(bs))) def _bytes_to_rgb(bs): return tuple(ord(bs[i]) for i in xrange(1, 4)) def _nopil_open_socket(filename): # Listen on a local IPv4-style socket to receive image data s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('localhost', nopil_port)) s.listen(1) # Run a java utility to send the pixels of the image over our socket command = ['java', '-jar', 'ImagePiper.jar', 'send', filename] subprocess.Popen(command) # Wait for the java utility to connect and move sockets conn, addr = s.accept() s.close() # Read the width and the height size = conn.recv(8) size = [_bytes_to_int(size[i*4:i*4+4]) for i in xrange(2)] w, h = size # Read entire lines in from the socket lines = [conn.recv(4 * w) for line in xrange(h)] data = [_bytes_to_rgb(lines[line][i*4:i*4+4]) for line in xrange(h) for i in xrange(w)] # Close the connection conn.close() return (data, (w, h)) def _pil_save(image, filename): w, h = image.size pil_image = PILImage.new("RGB", (w, h)) pil_image.putdata(image.data) pil_image.save(filename, "png") def _nopil_save(image, filename): # Run a java utility to read in the pixels of the image and save them to a file command = ['java', '-jar', 'ImagePiper.jar', 'write', filename] image_piper = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE) # Read the encoding from ImagePiper and create a codec for it radix = int(image_piper.stdout.readline()) codec = IntegerCodec() # Write the width and the height w, h = image.size image_piper.stdin.write("%s %s\n" % (codec.encode(w, radix), codec.encode(h, radix))) # Write the pixels line by line pixels = map(lambda pixel: codec.encode(Color.rgb_to_int(pixel), radix), image.data) lines = (" ".join(pixels[image._get_index((0, line)):image._get_index((w, line))]) for line in xrange(h)) image_piper.stdin.write("\n".join(lines)) # Flush the writes image_piper.communicate() try: from PIL import Image as PILImage _has_PIL = True except: _has_PIL = False _nopil_open = _nopil_open_socket class IntegerCodec: def __init__(self): self._base_list = string.digits + string.letters + '_@' def decode(self, int_string, radix): return int(int_string, radix) def encode(self, integer, radix): # Only encode absolute value of integer sign = '' if integer < 0: sign = '-' integer = abs(integer) int_string = '' while integer != 0: int_string = self._base_list[integer % radix] + int_string integer /= radix return sign + int_string class Color: def __init__(self, color): if type(color) is type(0): self.color = Color.int_to_rgb(color) else: self.color = color def as_int(self): return Color.rgb_to_int(self.color) def as_rgb(self): return self.color @staticmethod def int_to_rgb(rgb_int): r = (rgb_int >> 16) & 255 g = (rgb_int >> 8) & 255 b = rgb_int & 255 return (r, g, b) @staticmethod def rgb_to_int(rgb): r, g, b = rgb rgb_int = r rgb_int = (rgb_int << 8) + g rgb_int = (rgb_int << 8) + b return rgb_int def squared_euclidean_distance(self, other): return sum((self.color[i] - other.color[i])**2 for i in xrange(len(self.color))) class Image: def __init__(self, *args): if type(args[0]) is type("string"): # Assume we were passed a filename self._open(args[0]) elif type(args[0]) is type(self): # Assume we were passed another image self._copy(args[0]) else: # Assume we were passed a size tuple and possibly a color self._create(*args) def _open(self, filename): if _has_PIL and use_PIL: _opener = _pil_open else: _opener = _nopil_open self.data, self.size = _opener(filename) def _create(self, size, color = (0, 0, 0)): size = tuple(int(x) for x in size) w, h = self.size = size self.data = [color] * w * h def _copy(self, image): self.size = image.size self.data = image.data[:] def _get_index(self, loc): # Convert an (x, y) pair to a 1-dimensional index loc = tuple(int(x) for x in loc) x, y = loc w, h = self.size return y * w + x def getpixel(self, loc): return self.data[self._get_index(loc)] def putpixel(self, loc, color): color = tuple(min(x, 255) for x in color) self.data[self._get_index(loc)] = color def temp_file(self): handle, filename = tempfile.mkstemp() self.save(filename) os.close(handle) temp_files.append(filename) return filename def _show_in_os(self): # Save the image to a temporary file for another process to read filename = self.temp_file() if os.name == 'nt': os.startfile(filename) else: # Assume we are on a mac and attempt to use the open command retcode = subprocess.call(['open', filename]) if retcode is not 0: # The open command failed, so assume we are on Linux subprocess.call(['xdg-open', filename]) def show(self, default=False, wait=False): # Open the image using the user's default imaging viewing application, cannot wait if default or not _has_Tk: self._show_in_os() else: # Open the file using our own image viewer viewer = ImageViewer(self, wait) def save(self, filename): if _has_PIL and use_PIL: _saver = _pil_save else: _saver = _nopil_save _saver(self, filename) @staticmethod def new(mode, size, color = (0, 0, 0)): #ignore mode for now return Image(size, color) def copy(self): return Image(self) def __ne__(self, other): w1, h1 = self.size w2, h2 = other.size if w1 != w2 or h1 != h2: return True for i
import datetime from django.test import TestCase from django.utils import timezone from schedule.models import Event, Rule, Calendar from schedule.utils import EventListManager class TestEventListManager(TestCase): def setUp(self): weekly = Rule.objects.create(frequency="WEEKLY") daily = Rule.objects.create(frequency="DAILY") cal = Calendar.objects.create(name="MyCal") self.default_tzinfo = timezone.get_default_timezone() self.event1 = Event(**{ 'title': 'Weekly Event', 'start': datetime.datetime(2009, 4, 1, 8, 0, tzinfo=self.default_tzinfo), 'end': datetime.datetime(2009, 4, 1, 9, 0, tzinfo=self.default_tzinfo), 'end_recurring_period': datetime.datetime(2009, 10, 5, 0, 0, tzinfo=self.default_tzinfo), 'rule': weekly, 'calendar': cal }) self.event1.save() self.event2 = Event(**{ 'title': 'Recent Event', 'start': datetime.datetime(2008, 1, 5, 9, 0, tzinfo=self.default_tzinfo), 'end': datetime.datetime(2008, 1, 5, 10, 0, tzinfo=self.default_tzinfo), 'end_recurring_period': datetime.datetime(2009, 5, 5, 0, 0, tzinfo=self.default_tzinfo), 'rule': daily, 'calendar': cal }) self.event2.save() def test_occurrences_after(self): eml = EventListManager([self.event1, self.even
t2]) occurrences = eml.occurrences_after(datetime.datetime(2009, 4, 1, 0, 0, tzinfo=self.default_tzinfo)) self.assertEqual(next(occurrences).event, self.event1) self.assertEqual(next(occurrences).event, self.event2) self.assertEqual(next(occurrences).event, self.event2) self.assertEqual(next(occurrences).event, self.event2) self.assertEqual(next(occurrences).event, self.event2) self.assertEqual(next(occurrences).event, self.event2) self.
assertEqual(next(occurrences).event, self.event2) self.assertEqual(next(occurrences).event, self.event2) self.assertEqual(next(occurrences).event, self.event1) occurrences = eml.occurrences_after() self.assertEqual(list(occurrences), [])
import re import time import readline import os # CONVERT shell colors to the same curses palette SHELL_COLORS = { "wr": '\033[1;37;41m', # white on red "wo": '\033[1;37;43m', # white on orange "wm": '\033[1;37;45m', # white on magenta "wb": '\033[1;37;46m', # white on blue "bw": '\033[1;37;40m', # black on white "lblue": '\033[1;34m', # light blue "lred": '\033[1;31m', # light red "lgreen": '\033[1;32m', # light green "yellow": '\033[1;33m', # yellow "cyan": '\033[36m', # cyan "blue": '\033[34m', # blue "green": '\033[32m', # green "orange": '\033[33m', # orange "red": '\033[31m', # red "magenta": "\033[35m", # magenta "white": "\033[0m", # white None: "\033[0m", # end } def color(string, color): return "%s%s%s" %(SHELL_COLORS[color], string, SHELL_COLORS[None]) def clear_color(string): return re.sub("\\033\[[^m]+m", "", string) def print_table(items, header=None, wrap=True, max_col_width=20, wrap_style="wrap", row_line=False, fix_col_width=False): ''' Prints a matrix of data as a human readable table. Matrix should be a list of lists containing any type of values that can be converted into text strings. Two different column adjustment methods are supported through the *wrap_style* argument: wrap: it will wrap values to fit max_col_width (by extending cell height) cut: it will strip values to max_col_width If the *wrap* argument is set to False, column widths are set to fit all values in each column. This code is free software. Updates can be found at https://gist.github.com/jhcepas/5884168 # print_table([[3,2, {"whatever":1, "bla":[1,2]}], [5,"this is a test\n of wrapping text\n with the new function",777], [1,1,1]], # header=[ "This is column number 1", "Column number 2", "col3"], # wrap=True, max_col_width=15, wrap_style='wrap', # row_line=True, fix_col_width=True) # This is column | Column number 2 | col3 # number 1 | | # =============== | =============== | =============== # 3 | 2 | {'bla': [1, 2], # | | 'whatever': 1} # --------------- | --------------- | --------------- # 5 | this is a test | 777 # | of | # | wrapping text | # | with the new | # | function | # --------------- | --------------- | --------------- # 1 | 1 | 1 # =============== | =============== | =============== ''' def safelen(string): return len(clear_color(string)) if isinstance(fix_col_width, list): c2maxw = dict([(i, fix_col_width[i]) for i in xrange(len(items[0]))]) wrap = True elif fix_col_width == True: c2maxw = dict([(i, max_col_width) for i in xrange(len(items[0]))]) wrap = True elif not wrap: c2maxw = dict([(i, max([safelen(str(e[i])) for e in items])) for i in xrange(len(items[0]))]) else: c2maxw = dict([(i, min(max_col_width, max([safelen(str(e[i])) for e in items]))) for i in xrange(len(items[0]))]) if header: current_item = -1 row = header if wrap and not fix_col_width: for col, maxw in c2maxw.iteritems(): c2maxw[col] = max(maxw, safelen(header[col])) if wrap: c2maxw[col] = min(c2maxw[col], max_col_width) else: current_item = 0 row = items[current_item] while row: is_extra = False values = [] extra_line = [""]*len(row) for col, val in enumerate(row): cwidth = c2maxw[col] wrap_width = cwidth val = clear_color(str(val)) try: newline_i = val.index("\n") except ValueError: pass else: wrap_width = min(newline_i+1, wrap_width) val = val.replace("\n", " ", 1) if wrap and safelen(val) > wrap_width: if wrap_style == "cut": val = val[:wrap_width-1]+"+" elif wrap_style == "wrap": extra_line[col] = val[wrap_width:] val = val[:wrap_width] val = val.ljust(cwidth) values.append(val) print ' | '.join(values) if not set(extra_line) - set(['']): if header and current_item == -1: print ' | '.join(['='*c2maxw[col] for col in xrange(len(row)) ]) current_item += 1 try: row = items[current_item] except IndexError: row = None else: row = extra_line is_extra = True if row_l
ine and not is_extra and not (header and current_item == 0): if row: print ' | '.join(['-'*c2maxw[col] for col in xrange(len(row)) ]) else:
print ' | '.join(['='*c2maxw[col] for col in xrange(len(extra_line)) ]) def ask_filename(text): readline.set_completer(None) fname = "" while not os.path.exists(fname): fname = raw_input(text) return fname def ask(string,valid_values,default=-1,case_sensitive=False): """ Asks for a keyborad answer """ v = None if not case_sensitive: valid_values = [value.lower() for value in valid_values] while v not in valid_values: v = raw_input("%s [%s]" % (string,','.join(valid_values) )) if v == '' and default>=0: v = valid_values[default] if not case_sensitive: v = v.lower() return v def timeit(f): def a_wrapper_accepting_arguments(*args, **kargs): t1 = time.time() r = f(*args, **kargs) print " ", f.func_name, time.time() - t1, "seconds" return r return a_wrapper_accepting_arguments
from __future__ i
mport unicode_literals import frappe from frappe import _ from erpnext.setup.setup_wizard.operations.install_fixtures import add_market_segments def execute(): frappe.r
eload_doc('crm', 'doctype', 'market_segment') frappe.local.lang = frappe.db.get_default("lang") or 'en' add_market_segments()
import tkinter as tk from tkinter import ttk import pkinter as pk root = tk.Tk() menu = tk.Menu(root, type="menubar") filemenu = tk.Menu(menu) filemenu.add_command(label="New") filemenu.add_command(label="Save") menu.add_cascade(label="File", menu=filemenu) helpmenu = tk.Menu(menu) helpmenu.add_checkbutton(label="About") helpmenu.add_separator() helpmenu.add_checkbutton(label="Changelog") menu.add_cascade(label="Help", menu=helpmenu) root.configure(menu=menu) ################################################## toolbar = pk.Toolbar(root) toolbar.pack(side="top", fill="x") button = toolbar.add_button(text="Button") toolbar.add_separator() checkbutton1 = toolbar.add_checkbutton(text="CheckButton 1") checkbutton2 = toolbar.add_checkbutton(text="CheckButton 2") toolbar.add_separator() radiobutton1 = toolbar.add_radiobutton(text="RadioButton 1", value=0) radiobutton2 = toolbar.add_radiobutton(text="RadioButton 2", value=1) radiobutton3 = toolbar.add_radiobutton(text="RadioButton 3", value=2) tool
bar.add_separator() ###############################################
### statusbar = pk.Statusbar(root) statusbar.pack(side="bottom", fill="x") variable = tk.StringVar() statusbar.bind_widget(button, variable, "A Button", "") statusbar.bind_widget(checkbutton1, variable, "A Checkbutton", "") statusbar.bind_widget(checkbutton2, variable, "Another Checkbutton", "") statusbar.bind_widget(radiobutton1, variable, "A Radiobutton", "") statusbar.bind_widget(radiobutton2, variable, "Another Radiobutton", "") statusbar.bind_widget(radiobutton3, variable, "A Third Radiobutton", "") statusbar.bind_menu(menu, variable, ["Open the File menu.", "Open the Help menu."]) statusbar.bind_menu(filemenu, variable, ["Tear-off the menu.", "Create a new file.", "Save the current file."]) statusbar.bind_menu(helpmenu, variable, ["Tear-off the menu.", "Open the About window.", "", "Open the Changelog."]) statusbar.add_variable(variable=variable) ################################################## frame = ttk.Frame(root) frame.pack(fill="both") ################################################## tlf = pk.ToggledLabelFrame(frame) tlf.grid(row=0, column=0) ################################################## for i in range(5): ttk.Button(tlf.frame).pack() ls = pk.LabeledSeparator(frame, text="LabeledSeparator") ls.grid(row=0, column=1) ################################################## rs = pk.RoundingScale(frame, from_=0, to=5) rs.grid(row=0, column=2) ################################################## et = pk.EntryText(frame, text="EntryText") et.grid(row=1, column=0) ################################################## le = pk.LimitedEntry(frame) le.grid(row=1, column=1) ################################################## cpb = pk.ColourPickerButton(frame) cpb.grid(row=1, column=2) ################################################## el = pk.EditableLabel(frame, text="EditableLabel") el.grid(row=2, column=0) ################################################## cp = pk.CollapsiblePane(frame) cp.grid(row=2, column=1) for i in range(5): ttk.Button(cp.frame).pack() ################################################## hl = pk.Hyperlink(frame, text="Hyperlink") hl.grid(row=2, column=2) ################################################## pv = pk.PageView(frame) pv.grid(row=3, column=0) frame1 = ttk.Frame(pv.frame) for i in range(3): ttk.Button(frame1, text=i).pack(side="left") frame2 = ttk.Frame(pv.frame) ttk.Checkbutton(frame2, text="Checkbutton").pack() frame3 = ttk.Frame(pv.frame) ttk.Label(frame3, text="Frame 3").pack(side="bottom") pv.add(child=frame1) pv.add(child=frame2) pv.add(child=frame3) ################################################## def func(): print("Function") bb = pk.BoundButton(frame, text="BoundButton", key="b", command=func) bb.grid(row=3, column=1) ################################################## ve = pk.ValidEntry(frame, valid_list=["validentry", "validEntry", "Validentry", "ValidEntry"]) ve.grid(row=3, column=2) ################################################## cb = pk.ChoiceBook(frame) cb.grid(row=4, column=0) frame1 = ttk.Frame(cb.frame) for i in range(3): ttk.Button(frame1, text=i).pack(side="left") frame2 = ttk.Frame(cb.frame) ttk.Checkbutton(frame2, text="Checkbutton").pack() frame3 = ttk.Frame(cb.frame) ttk.Label(frame3, text="Frame 3").pack(side="bottom") cb.add(child=frame1, label="Frame1") cb.add(child=frame2, label="Frame2") cb.add(child=frame3, label="Frame3") ################################################## pe = pk.PasswordEntry(frame, cover_character="*") pe.grid(row=4, column=1) ################################################## iv = pk.InvalidEntry(frame, invalid_list=["invalidentry", "invalidEntry", "Invalidentry", "InvalidEntry"]) iv.grid(row=4, column=2) ################################################## lb = pk.ListBook(frame) lb.grid(row=5, column=0) frame1 = ttk.Frame(lb.frame) for i in range(3): ttk.Button(frame1, text=i).pack(side="left") frame2 = ttk.Frame(lb.frame) ttk.Checkbutton(frame2, text="Checkbutton").pack() frame3 = ttk.Frame(lb.frame) ttk.Label(frame3, text="Frame 3").pack(side="bottom") lb.add(child=frame1, label="Frame1") lb.add(child=frame2, label="Frame2") lb.add(child=frame3, label="Frame3") ################################################## al = pk.AccelLabel(frame, label_text="AccelLabel", accelerator_text="Ctrl+A") al.grid(row=5, column=1) ################################################## ib = pk.InfoBar(frame, title="InfoBar", info="Shows information.") ib.grid(row=5, column=2) ################################################## lb = pk.LockButton(frame) lb.grid(row=6, column=0) ################################################## tb = pk.ToggleButton(frame) tb.grid(row=6, column=1) ################################################## ss = pk.ScaleSwitch(frame) ss.grid(row=6, column=2) ################################################## bs = pk.ButtonSwitch(frame) bs.grid(row=7, column=0) ################################################## fp = pk.FilePicker(frame) fp.grid(row=7, column=1) ################################################## dp = pk.DirectoryPicker(frame) dp.grid(row=7, column=2) ################################################## pk.center_on_screen(root) ################################################## tp = tk.Toplevel(root) pk.center_on_parent(tp) ################################################## root.mainloop()
# ---------------------------------------------------------------------------- # Copyright
2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at #
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Defines video dataset handling. """ class Video(object): def __init__(self): raise NotImplementedError()
": "H-301", "attr-address_2": "Street 1", "attr-address_3": "UK", "attr-postcode_zip": "G61 3NR", "attr-date_of_birth": "December 28, 1990", "attr-account_type": "savings", "attr-year_opened": "2000", "attr-account_status": "active" } @pytest.fixture(scope="module") def transcriptClaimMap(): return { 'inviter': 'Faber College', 'name': 'Transcript', 'status': "available (not yet issued)", "version": "1.2", "attr-student_name": "string", "attr-ssn": "string", "attr-degree": "string", "attr-year": "string", "attr-status": "string" } @pytest.fixture(scope="module") def jobCertClaimAttrValueMap(): return { "attr-first_name": "Alice", "attr-last_name": "Garcia", "attr-employee_status": "Permanent", "attr-experience": "3 years", "attr-salary_bracket": "between $50,000 to $100,000" } @pytest.fixture(scope="module") def jobCertificateClaimValueMap(jobCertClaimAttrValueMap): basic = { 'inviter': 'Acme Corp', 'name': 'Job-Certificate', 'status': "available (not yet issued)", "version": "0.2" } basic.update(jobCertClaimAttrValueMap) return basic @pytest.fixture(scope="module") def jobCertificateClaimMap(): return { 'inviter': 'Acme Corp', 'name': 'Job-Certificate', 'status': "available (not yet issued)", "version": "0.2", "attr-first_name": "string", "attr-last_name": "string", "attr-employee_status": "string", "attr-experience": "string", "attr-salary_bracket": "string" } @pytest.fixture(scope="module") def reqClaimOut(): return ["Found claim {name} in connection {inviter}", "Requesting claim {name} from {inviter}..."] # TODO Change name @pytest.fixture(scope="module") def reqClaimOut1(): return ["Found claim {name} in connection {inviter}", "Requesting claim {name} from {inviter}...", "Signature accepted.", 'Received claim "{name}".'] @pytest.fixture(scope="module") def rcvdTranscriptClaimOut(): return ["Found claim {name} in connection {inviter}", "Name: {name}", "Status: ", "Version: {version}", "Attributes:", "student_name: {attr-student_name}", "ssn: {attr-ssn}", "degree: {attr-degree}", "year: {attr-year}", "status: {attr-status}" ] @pytest.fixture(scope="module") def rcvdBankingRelationshipClaimOut(): return ["Found claim {name} in connection {inviter}", "Name: {name}", "Status: ", "Version: {version}", "Attributes:", "title: {attr-title}", "first_name: {attr-first_name}", "last_name: {attr-last_name}", "address_1: {attr-address_1}", "address_2: {attr-address_2}", "address_3: {attr-address_3}", "postcode_zip: {attr-postcode_zip}", "date_of_birth: {attr-date_of_birth}", "year_opened: {attr-year_opened}", "account_status: {attr-account_status}" ] @pytest.fixture(scope="module") def rcvdJobCertClaimOut(): return ["Found claim {name} in connection {inviter}", "Name: {name}", "Status: ", "Version: {version}", "Attributes:", "first_name: {attr-first_name}", "last_name: {attr-last_name}", "employee_status: {attr-employee_status}", "experience: {attr-experience}", "salary_bracket: {attr-salary_bracket}" ] @pytest.fixture(scope="module") def showTranscriptClaimOut(nextCommandsToTryUsageLine): return ["Found claim {name} in connection {inviter}", "Name: {name}", "Status: {status}", "Version: {version}", "Attributes:", "student_name", "ssn", "degree", "year", "status" ] + nextCommandsToTryUsageLine + \ ['request claim "{name}"'] @pytest.fixture(scope="module") def showJobCertClaimOut(nextCommandsToTryUsageLine): return ["Found claim {name} in connection {inviter}", "Name: {name}", "Status: {status}", "Version: {version}", "Attributes:", "first_name", "last_name", "employee_status", "experience", "salary_bracket" ] + nextCommandsToTryUsageLine + \ ['request claim "{name}"'] @pytest.fixture(scope="module") def showBankingRelationshipClaimOut(nextCommandsToTryUsageLine): return ["Found claim {name} in connection {inviter}", "Name: {name}", "Status: {status}", "Version: {version}", "Attributes:", "title", "first_name", "last_name", "address_1", "address_2", "address_3", "postcode_zip", "date_of_birth", "account_type", "year_opened", "account_status" ] + nextCommandsToTryUsageLine + \ ['request claim "{name}"'] @pytest.fixture(scope="module") def showConnectionWithProofRequestsOut(): return ["Proof Request(s): {proof-requests}"] @pytest.fixture(scope="module") def showConnectionWithAvailableClaimsOut(): return ["Available Claim(s): {claims}"] @pytest.fixture(scope="module") def showAcceptedConnectionWithClaimReqsOut( showAcceptedConnectionOut, showConnectionWithProofRequestsOut, showConnectionWithAvailableClaimsOut, showConnectionSuggestion): return showAcceptedConnectionOut + showConnectionWithProofRequestsOut + \ showConnectionWithAvailableClaimsOut + \ showCo
nnectionSuggestion @pytest.fixture(scope="module") def showAcceptedConnectionWithoutAvailableClaimsOut( showAcceptedConnectionOut, showConnectionWithProofRequestsOut): return showAcceptedConnectionOut + showConnectionWithProofRequestsOut @pytest.fixture(scope="module") def showAcceptedConnectionWithAvailableClaimsOut( showAccept
edConnectionOut, showConnectionWithProofRequestsOut, showConnectionWithAvailableClaimsOut): return showAcceptedConnectionOut + showConnectionWithProofRequestsOut + \ showConnectionWithAvailableClaimsOut @pytest.fixture(scope="module") def showConnectionSuggestion(nextCommandsToTryUsageLine): return nextCommandsToTryUsageLine + \ ['show claim "{claims}"', 'request claim "{claims}"'] @pytest.fixture(scope="module") def showAcceptedConnectionOut(): return [ "Connection", "Name: {inviter}", "DID: {DID}", "Verification key: {verkey}", "Remote: {remote}", "Remote Verification key: {remote-verkey}", "Trust anchor: {inviter} (confirmed)", "Request nonce: {nonce}", "Request status: Accepted"] @pytest.fixture(scope="module") def showConnectionOut(nextCommandsToTryUsageLine, connectionNotYetSynced): return [ " Name: {inviter}", " DID: not yet assigned", " Trust anchor: {inviter} (not yet written to Indy)", " Verification key: <empty>", " Signing key: <hidden>", " Remote: {remote}", " Remote endpoint: {endpoint}", " Request nonce: {nonce}", " Request status: not verified, remote verkey unknown", " Last synced: {last_synced}"] + \ [""] + \ nextCommandsToTryUsageLine + \ [' sync "{inviter}"', ' accept request from "{inviter}"', '', ''] @pytest.fixture(scope="module") def showAcceptedSyncedConnectionOut(nextCommandsToTryUsageLine): return [ "Connection", "Name: {inviter}", "Trust anchor: {inviter} (confirmed)", "Verification key: ~", "Signing key: <hidden>",
from __future__ import print_function, division, absolute_import # Copyright (c) 2016 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. import logging import sys import six import decorator import dbus.service import json import re from rhsmlib.dbus import exceptions log = logging.getLogger(__name__) __all__ = [ 'dbus_handle_exceptions', 'dbus_service_method', 'dbus_service_signal' ] @decorator.decorator def dbus_handle_exceptions(func, *args, **kwargs): """Decorator to handle exceptions, log them, and wrap them if necessary""" try: ret = func(*args, **kwargs) return ret except Exception as err: log.exception(err) trace = sys.exc_info()[2] severity = "error" # Remove "HTTP error (...): " string from the messages: pattern = '^HTTP error \x28.*\x29: ' err_msg = re.sub(pattern, '', str(err)) # Modify severity of some exception here if "Ignoring request to auto-attach. It is disabled for or
g" in err_msg: severity
= "warning" if hasattr(err, 'severity'): severity = err.severity # Raise exception string as JSON string. Thus it can be parsed and printed properly. error_msg = json.dumps( { "exception": type(err).__name__, "severity": severity, "message": err_msg } ) six.reraise(exceptions.RHSM1DBusException, exceptions.RHSM1DBusException(error_msg), trace) def dbus_service_method(*args, **kwargs): # Tell python-dbus that "sender" will be the keyword to use for the sender unless otherwise # defined. kwargs.setdefault("sender_keyword", "sender") return dbus.service.method(*args, **kwargs) def dbus_service_signal(*args, **kwargs): """ Decorator used for signal :param args: :param kwargs: :return: """ return dbus.service.signal(*args, **kwargs)
# Copyright (c) 2016, Daniele Venzano # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Retrieves metrics about services from KairosDB.""" from datetime import datetime, timedelta import logging import requests from zoe_lib.config import get_conf log = logging.getLogger(__name__) class KairosDBInMetrics: """KairosDB metrics.""" def __init__(self): self.base_url = get_conf().kairosdb_url self.tags_url = self.base_url + '/api/v1/datapoints/query/tags' self.metrics_url = self.base_url + '/api/v1/datapoints/query' self.list_metrics_url = self.base_url + '/api/v1/metricnames' def _prepare_query(self): query = { 'time_zone': 'UTC', 'metrics': [] } self._add_time_range(query) return query def _add_time_range(self, query, minutes_from_now=10): end = datetime.now() start = end - timedelta(minutes=minutes_from_now) query['start_absolute'] = int(start.timestamp() * 1000) query['end_absolute'] = int(end.timestamp() * 1000) def _add_metric(self, query, metric_name: str, tags, aggregators, limit: int): metric = { 'name': metric_name, } if tags is not None: metric['tags'] = tags if aggregators is not None: metric['aggregators'] = aggregators if limit > 0: metric['limit'] = limit query['metrics'].append(metric) def get_service_usage(self, service_id): """Query the DB for the current usage metrics.""" query = self._prepare_query() tags_cpu = { "field": ["usage_percent"], "zoe_service_id": service_id } aggregators_cpu = [ {"name": "scale", "factor": "0.01"}, {"name": "sum", "sampling": {"value": "1", "unit": "minutes"}, "align_sampling": False}
] self._add_metric(query, "docker_container_cpu", tags_cpu, aggregators_cpu, limit=0) tags_memory = { "field": ["usage"], "zoe_service_id": service_id } aggregators_memory
= [ {"name": "sum", "sampling": {"value": "1", "unit": "minutes"}, "align_sampling": False} ] self._add_metric(query, "docker_container_mem", tags_memory, aggregators_memory, limit=0) try: req = requests.post(self.metrics_url, json=query) except requests.exceptions.ConnectionError: return None return self._extract_data(req) def _extract_data(self, response): if response is None: return None if response.status_code != 200: error_msg = '' for error in response.json()['errors']: error_msg += ' {}'.format(error) log.error('kairosdb query error: {}'.format(error_msg)) return None else: data = response.json() cpu_results = data['queries'][0] mem_results = data['queries'][1] if cpu_results['sample_size'] > 0: assert len(cpu_results['results']) == 1 cpu_usage = cpu_results['results'][0]['values'][-1][1] else: cpu_usage = 0 if mem_results['sample_size'] > 0: assert len(mem_results['results']) == 1 mem_usage = mem_results['results'][0]['values'][-1][1] else: mem_usage = 0 return { 'cpu_usage': cpu_usage, 'mem_usage': mem_usage }
f
rom django.contrib import admin from cobra.core.loading import get_model
# -*- coding: utf-8 -*- # # Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com> # # This file is part of Weblate <http://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Tests for sugesti
on views. """ from weblate.trans.models.unitdata import Suggestion from weblate.trans.tests.test_views import ViewTestCase class SuggestionsTest(ViewTestCase): def add_suggestion_1(self): return self.edit_unit( 'Hel
lo, world!\n', 'Nazdar svete!\n', suggest='yes' ) def add_suggestion_2(self): return self.edit_unit( 'Hello, world!\n', 'Ahoj svete!\n', suggest='yes' ) def test_add(self): translate_url = self.get_translation().get_translate_url() # Try empty suggestion (should not be added) response = self.edit_unit( 'Hello, world!\n', '', suggest='yes' ) # We should stay on same message self.assertRedirectsOffset(response, translate_url, 0) # Add first suggestion response = self.add_suggestion_1() # We should get to second message self.assertRedirectsOffset(response, translate_url, 1) # Add second suggestion response = self.add_suggestion_2() # We should get to second message self.assertRedirectsOffset(response, translate_url, 1) # Reload from database unit = self.get_unit() translation = self.subproject.translation_set.get( language_code='cs' ) # Check number of suggestions self.assertEqual(translation.have_suggestion, 1) self.assertBackend(0) # Unit should not be translated self.assertEqual(len(unit.checks()), 0) self.assertFalse(unit.translated) self.assertFalse(unit.fuzzy) self.assertEqual(len(self.get_unit().suggestions()), 2) def test_delete(self): translate_url = self.get_translation().get_translate_url() # Create two suggestions self.add_suggestion_1() self.add_suggestion_2() # Get ids of created suggestions suggestions = [sug.pk for sug in self.get_unit().suggestions()] self.assertEqual(len(suggestions), 2) # Delete one of suggestions response = self.edit_unit( 'Hello, world!\n', '', delete=suggestions[0], ) self.assertRedirectsOffset(response, translate_url, 0) # Reload from database unit = self.get_unit() translation = self.subproject.translation_set.get( language_code='cs' ) # Check number of suggestions self.assertEqual(translation.have_suggestion, 1) self.assertBackend(0) # Unit should not be translated self.assertEqual(len(unit.checks()), 0) self.assertFalse(unit.translated) self.assertFalse(unit.fuzzy) self.assertEqual(len(self.get_unit().suggestions()), 1) def test_accept(self): translate_url = self.get_translation().get_translate_url() # Create two suggestions self.add_suggestion_1() self.add_suggestion_2() # Get ids of created suggestions suggestions = [sug.pk for sug in self.get_unit().suggestions()] self.assertEqual(len(suggestions), 2) # Accept one of suggestions response = self.edit_unit( 'Hello, world!\n', '', accept=suggestions[1], ) self.assertRedirectsOffset(response, translate_url, 0) # Reload from database unit = self.get_unit() translation = self.subproject.translation_set.get( language_code='cs' ) # Check number of suggestions self.assertEqual(translation.have_suggestion, 1) # Unit should be translated self.assertEqual(len(unit.checks()), 0) self.assertTrue(unit.translated) self.assertFalse(unit.fuzzy) self.assertEqual(unit.target, 'Ahoj svete!\n') self.assertBackend(1) self.assertEqual(len(self.get_unit().suggestions()), 1) def test_accept_anonymous(self): translate_url = self.get_translation().get_translate_url() self.client.logout() # Create suggestions self.add_suggestion_1() self.client.login(username='testuser', password='testpassword') # Get ids of created suggestion suggestions = list(self.get_unit().suggestions()) self.assertEqual(len(suggestions), 1) self.assertIsNone(suggestions[0].user) # Accept one of suggestions response = self.edit_unit( 'Hello, world!\n', '', accept=suggestions[0].pk, ) self.assertRedirectsOffset(response, translate_url, 0) # Reload from database unit = self.get_unit() translation = self.subproject.translation_set.get( language_code='cs' ) # Check number of suggestions self.assertEqual(translation.have_suggestion, 0) # Unit should be translated self.assertEqual(unit.target, 'Nazdar svete!\n') def test_vote(self): translate_url = self.get_translation().get_translate_url() self.subproject.suggestion_voting = True self.subproject.suggestion_autoaccept = 0 self.subproject.save() self.add_suggestion_1() suggestion_id = self.get_unit().suggestions()[0].pk response = self.edit_unit( 'Hello, world!\n', '', upvote=suggestion_id, ) self.assertRedirectsOffset(response, translate_url, 0) suggestion = Suggestion.objects.get(pk=suggestion_id) self.assertEqual( suggestion.get_num_votes(), 1 ) response = self.edit_unit( 'Hello, world!\n', '', downvote=suggestion_id, ) self.assertRedirectsOffset(response, translate_url, 0) suggestion = Suggestion.objects.get(pk=suggestion_id) self.assertEqual( suggestion.get_num_votes(), -1 ) def test_vote_autoaccept(self): self.add_suggestion_1() translate_url = self.get_translation().get_translate_url() self.subproject.suggestion_voting = True self.subproject.suggestion_autoaccept = 1 self.subproject.save() suggestion_id = self.get_unit().suggestions()[0].pk response = self.edit_unit( 'Hello, world!\n', '', upvote=suggestion_id, ) self.assertRedirectsOffset(response, translate_url, 0) # Reload from database unit = self.get_unit() translation = self.subproject.translation_set.get( language_code='cs' ) # Check number of suggestions self.assertEqual(translation.have_suggestion, 0) # Unit should be translated self.assertEqual(len(unit.checks()), 0) self.assertTrue(unit.translated) self.assertFalse(unit.fuzzy) self.assertEqual(unit.target, 'Nazdar svete!\n') self.assertBackend(1)
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your
option) any later version. # # This progr
am is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### __author__ = "Sergi Blanch-Torne" __copyright__ = "Copyright 2015, CELLS / ALBA Synchrotron" __license__ = "GPLv3+" import os from taurus.external.qt import Qt from taurus.qt.qtgui.container import TaurusWidget from taurus.qt.qtgui.util.ui import UILoadable import traceback @UILoadable(with_ui="_ui") class ActionForm(TaurusWidget): def __init__(self, parent=None, name=None, designMode=False): try: self.__name = name.__name__ except: self.__name = "ActionForm" super(ActionForm, self).__init__(parent, designMode=designMode) try: self.debug("[%s]__init__()" % (self.__name)) basePath = os.path.dirname(__file__) if len(basePath) == 0: basePath = '.' self.loadUi(filename="actionWidget.ui", path=basePath+"/ui") except Exception as e: self.warning("[%s]__init__(): Widget exception! %s" % (self.__name, e)) traceback.print_exc() self.traceback() @classmethod def getQtDesignerPluginInfo(cls): ret = TaurusWidget.getQtDesignerPluginInfo() ret['module'] = 'actionform' ret['group'] = 'Taurus Linac Widgets' ret['container'] = ':/designer/dialogbuttonbox.png' ret['container'] = False return ret def main(): app = Qt.QApplication(sys.argv) w = ActionForm() w.show() sys.exit(app.exec_()) if __name__ == "__main__": main()
# Copyright © 2018 Red Hat, Inc. # # This file is part of Bodhi. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTA
BILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floo
r, Boston, MA 02110-1301, USA. """Test the bodhi.server.views package."""
import numpy as np import theano import theano.tensor as T from theano_utils import sharedX, floatX, intX def uniform(shape, scale=0.05): return sharedX(np.random.uniform(low=-scale, high=scale, size=shape)) def normal(shape, scale=0.05): return sharedX(np.random.randn(*shape) * scale) def orthogonal(shape, scale=1.1): """ benanne lasagne ortho init (faster than qr approach)""" flat_shape = (shape[0], np.prod(shape[1:])) a = np.
random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) q = u if u.shape == flat_shape else v # pick the one with the correct shape q = q.reshape(shape) return sharedX(scale * q[:shape[0],
:shape[1]])
try: from DevelopmentConfig import NasConf print("Loaded DevelopementConf file") except ImportError: from Config import NasConf print("Loaded Conf file") from ConfigParser import config_parser_class_tests, Co
nfigParser from Partition import partition_class_tests from Disk import disk_class_tests __author__ = 'm' # todo .gitignore # todo learn a proper unit tests def py_nas_tests(): try: config = ConfigParser(NasConf) except Exception as E: assert False, 'Failed to parse NasConfig\n' + str(E) assert partition_class_tests(), 'Partition class tests have failed.' assert disk_class_tests(), 'Disk class tests have faile
d.' assert config_parser_class_tests(), 'Config parser tests have failed' # todo parted tests # todo hdparm tests py_nas_tests() # todo blkid wrapper
fr
om django.conf.urls import patterns, url from .views import EmailAlternativeView urlpatterns = patterns( '', url(r'^email_alternative/(?P<pk>\d+)/$', EmailAlternativeView.as_view(), name='email_alternative'
), )
import re import unicodedata from djan
go.core.urlresolvers import reverse from django.core.exceptions import ObjectDoesNotExist # List of words you're not allowed to use as a slug RESERVED_KEYWORDS = [ "account", "add_to_network", "cache", "configuration", "content", "comment", "create"
, "delete", "download", "id", "invitations", "join", "media", "media_resource", "menu_builder", "new", "resource", "remove_from_network", "search", "static", "twistranet", "twistable", ] rsvd_kw = "$|".join(RESERVED_KEYWORDS) SLUG_REGEX = r"(?!%s$)[a-zA-Z_][a-zA-Z0-9_\-\.]*" % rsvd_kw # XXX TODO: The . must not be last character in the slug FULL_SLUG_REGEX = "^%s$" % SLUG_REGEX def slugify(value): """ Transform a string value into a 50 characters slug """ if not isinstance(value, unicode): # Double-check against invalid encodings value = unicode(value, errors = 'ignore') value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') value = unicode(re.sub('\s+', '_', value)) value = unicode(re.sub('[.@]', '_', value)) value = unicode(re.sub('[^\w\s_-]', '', value).strip().lower()) # If value starts with numbers, prefix it if re.match(r"[0-9]", value): value = u"_%s" % value # Double-check if we've slugified this correctly if not re.search(FULL_SLUG_REGEX, value): return slugify(u"%s0" % value) return value[:50]
F ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Main script to launch AugMix training on ImageNet. Currently only supports ResNet-50 training. Example usage: `python imagenet.py <path/to/ImageNet> <path/to/ImageNet-C>` """ from __future__ import print_function import argparse import os import shutil import time import augmentations import numpy as np import torch import torch.backends.cudnn as cudnn import torch.nn.functional as F from torchvision import datasets from torchvision import models from torchvision import transforms augmentations.IMAGE_SIZE = 224 model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) parser = argparse.ArgumentParser(description='Trains an ImageNet Classifier') parser.add_argument( 'clean_data', metavar='DIR', help='path to clean ImageNet dataset') parser.add_argument( 'corrupted_data', metavar='DIR_C', help='path to ImageNet-C dataset') parser.add_argument( '--model', '-m', default='resnet50', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet50)') # Optimization options parser.add_argument( '--epochs', '-e', type=int, default=90, help='Number of epochs to train.') parser.add_argument( '--learning-rate', '-lr', type=float, default=0.1, help='Initial learning rate.') parser.add_argument( '--batch-size', '-b', type=int, default=256, help='Batch size.') parser.add_argument('--eval-batch-size', type=int, default=1000) parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.') parser.add_argument( '--decay', '-wd', type=float, default=0.0001, help='Weight decay (L2 penalty).') # AugMix options parser.add_argument( '--mixture-width', default=3, type=int, help='Number of augmentation chains to mix per augmented example') parser.add_argument( '--mixture-depth', default=-1, type=int, help='Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]') parser.add_argument( '--aug-severity', default=1, type=int, help='Severity of base augmentation operators') parser.add_argument( '--aug-prob-coeff', default=1., type=float, help='Probability distribution coefficients') pars
er.add_argument( '--no-jsd', '-nj', action='store_true', help='Turn off JSD consistency loss.') parser.add_argument( '--all-ops', '-
all', action='store_true', help='Turn on all operations (+brightness,contrast,color,sharpness).') # Checkpointing options parser.add_argument( '--save', '-s', type=str, default='./snapshots', help='Folder to save checkpoints.') parser.add_argument( '--resume', '-r', type=str, default='', help='Checkpoint path for resume / test.') parser.add_argument('--evaluate', action='store_true', help='Eval only.') parser.add_argument( '--print-freq', type=int, default=10, help='Training loss print frequency (batches).') parser.add_argument( '--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') # Acceleration parser.add_argument( '--num-workers', type=int, default=4, help='Number of pre-fetching threads.') args = parser.parse_args() CORRUPTIONS = [ 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression' ] # Raw AlexNet errors taken from https://github.com/hendrycks/robustness ALEXNET_ERR = [ 0.886428, 0.894468, 0.922640, 0.819880, 0.826268, 0.785948, 0.798360, 0.866816, 0.826572, 0.819324, 0.564592, 0.853204, 0.646056, 0.717840, 0.606500 ] def adjust_learning_rate(optimizer, epoch): """Sets the learning rate to the initial LR (linearly scaled to batch size) decayed by 10 every n / 3 epochs.""" b = args.batch_size / 256. k = args.epochs // 3 if epoch < k: m = 1 elif epoch < 2 * k: m = 0.1 else: m = 0.01 lr = args.learning_rate * m * b for param_group in optimizer.param_groups: param_group['lr'] = lr def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k.""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def compute_mce(corruption_accs): """Compute mCE (mean Corruption Error) normalized by AlexNet performance.""" mce = 0. for i in range(len(CORRUPTIONS)): avg_err = 1 - np.mean(corruption_accs[CORRUPTIONS[i]]) ce = 100 * avg_err / ALEXNET_ERR[i] mce += ce / 15 return mce def aug(image, preprocess): """Perform AugMix augmentations and compute mixture. Args: image: PIL.Image input image preprocess: Preprocessing function which should return a torch tensor. Returns: mixed: Augmented and mixed image. """ aug_list = augmentations.augmentations if args.all_ops: aug_list = augmentations.augmentations_all ws = np.float32( np.random.dirichlet([args.aug_prob_coeff] * args.mixture_width)) m = np.float32(np.random.beta(args.aug_prob_coeff, args.aug_prob_coeff)) mix = torch.zeros_like(preprocess(image)) for i in range(args.mixture_width): image_aug = image.copy() depth = args.mixture_depth if args.mixture_depth > 0 else np.random.randint( 1, 4) for _ in range(depth): op = np.random.choice(aug_list) image_aug = op(image_aug, args.aug_severity) # Preprocessing commutes since all coefficients are convex mix += ws[i] * preprocess(image_aug) mixed = (1 - m) * preprocess(image) + m * mix return mixed class AugMixDataset(torch.utils.data.Dataset): """Dataset wrapper to perform AugMix augmentation.""" def __init__(self, dataset, preprocess, no_jsd=False): self.dataset = dataset self.preprocess = preprocess self.no_jsd = no_jsd def __getitem__(self, i): x, y = self.dataset[i] if self.no_jsd: return aug(x, self.preprocess), y else: im_tuple = (self.preprocess(x), aug(x, self.preprocess), aug(x, self.preprocess)) return im_tuple, y def __len__(self): return len(self.dataset) def train(net, train_loader, optimizer): """Train for one epoch.""" net.train() data_ema = 0. batch_ema = 0. loss_ema = 0. acc1_ema = 0. acc5_ema = 0. end = time.time() for i, (images, targets) in enumerate(train_loader): # Compute data loading time data_time = time.time() - end optimizer.zero_grad() if args.no_jsd: images = images.cuda() targets = targets.cuda() logits = net(images) loss = F.cross_entropy(logits, targets) acc1, acc5 = accuracy(logits, targets, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking else: images_all = torch.cat(images, 0).cuda() targets = targets.cuda() logits_all = net(images_all) logits_clean, logits_aug1, logits_aug2 = torch.split( logits_all, images[0].size(0)) # Cross-entropy is only computed on clean images loss = F.cross_entropy(logits_clean, targets) p_clean, p_aug1, p_aug2 = F.softmax( logits_clean, dim=1), F.softmax( logits_aug1, dim=1), F.softmax( logits_aug2, dim=1) # Clamp mixture distribution to avoid exploding KL divergence p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log() loss += 12 * (F.kl_div(p_mixture, p_clean, reduction='batchmean') + F.kl
import asyncio import ctypes import os import time import unittest import sys clib = ctypes.CDLL('libc.so.6', use_errno=True) class timespec(ctypes.Structure): _fields_ = [('tv_sec', ctypes.c_long), ('tv_nsec', ctypes.c_long)] class itimerspec(ctypes.Structure): _fields_ = [('it_interval', timespec), ('it_value', timespec)] timerfd_create = clib.timerfd_create timerfd_create.argtypes = [ctypes.c_int, ctypes.c_int] timerfd_settime = clib.timerfd_settime timerfd_settime.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(itimerspec), ctypes.POINTER(itimerspec)] TFD_NONBLOCK = os.O_NONBLOCK CLOCK_MONOTONIC = time.CLOCK_MONOTONIC class Timer: def __init__(self, *, loop=None): if loop is None: loop = asyncio.get_event_loop() self._fileno = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK) self._loop = loop loop.add_reader(self._fileno, self._reader) self._waiter = None def close(self): self._loop.remove_reader(self._fileno)
os.close(self._fileno) def start(s
elf, timeout): assert self._waiter is None, self._waiter secs = int(timeout) nsecs = int((timeout - secs) * 1000000) param = itimerspec() param.it_value.tv_sec = secs param.it_value.tv_nsec = nsecs param.it_interval.tv_sec = 0 param.it_interval.tv_nsec = 0 timerfd_settime(self._fileno, 0, ctypes.byref(param), None) self._waiter = asyncio.Future(loop=self._loop) def _reader(self): try: data = os.read(self._fileno, 8) except BlockingIOError: return else: if self._waiter.done(): return else: self._waiter.set_result(int.from_bytes(data, sys.byteorder)) @asyncio.coroutine def wait(self): assert self._waiter is not None try: ret = yield from self._waiter return ret finally: self._waiter = None class TestTimer(unittest.TestCase): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(None) def tearDown(self): self.loop.close() def test_ctor(self): timer = Timer(loop=self.loop) self.assertIs(self.loop, timer._loop) timer.close() def test_wait(self): timer = Timer(loop=self.loop) @asyncio.coroutine def go(): timer.start(0.5) t0 = self.loop.time() ret = yield from timer.wait() t1 = self.loop.time() self.assertGreater(0.5, t1-t0) self.assertEqual(1, ret) self.loop.run_until_complete(go()) timer.close() if __name__ == '__main__': unittest.main()
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Functions to generate various toy datasets. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import os import numpy as np import io PARSER = argparse.ArgumentParser(description="Generates toy datasets.") PARSER.add_argument( "--vocab_size", type=int, default=100, help="size of the vocabulary") PARSER.add_argument( "--num_examples", type=int, default=10000, help="number of examples") PARSER.add_argument( "--min_len", type=int, default=5, help="minimum sequence length") PARSER.add_argument( "--max_len", type=int, default=40, help="maximum sequence length") PARSER.add_argument( "--type", type=str, default="copy", choices=["copy", "reverse"], help="Type of dataet to generate. One of \"copy\" or \"reverse\"") PARSER.add_argument( "--output_dir", type=str, help="path to the output directory", required=True) ARGS = PARSER.parse_args() VOCABULARY = list([str(x) for x in range(ARGS.vocab_size - 1)]) # VOCABULARY += ["笑"] def get_target_token(source_tokens): num_odd = 0 num_even = 0 for token in source_tokens: if int(token) % 2 == 0: num_even += 1 else: num_odd += 1 if num_even == num_odd: return "EQUAL" elif num_even > num_odd: return "EVEN" else: return "ODD" def make_copy(num_examples, min_len, max_len): """ Generates a dataset where the target is equal to the source. Sequence lengths are chosen randomly from [min_len, max_len]. Args: num_examples: Number of examples to generate min_len: Minimum sequence length max_len: Maximum sequence length Returns: An iterator of (source, target) string tuples. """ ### Backup for old copy data generation # for _ in range(num_examples): # turn_length = np.random.choice(np.arange(min_len, max_len + 1)) # source_tokens = np.random.choice( # list(VOCABULARY), size=turn_length, replace=True) # target_tokens = source_tokens # yield " ".join(source_tokens), " ".join(target_tokens) # for _ in range(num_examples): turn_length = np.random.choice(np.arange(min_len, max_len + 1)) source_tokens = np.random.choice( list(VOCABULARY), size=turn_length, replace=True) target_token = get_target_token(source_tokens) yield " ".join(source_tokens), target_token def make_reverse(num_examples, min_len, max_len): """ Generates a dataset where the target is equal to the source reversed. Sequence lengths are chosen randomly from [min_len, max_len]. Args: num_examples: Number of examples to generate min_len: Minimum sequence length max_len: Maximum sequence length Returns: An iterator of (source, target) string tuples. """ for _ in range(num_examples): turn_length = np.random.choice(np.arange(min_len, max_len + 1)) source_tokens = np.random.choice( list(VOCABULARY), size=turn_length, replace=True) target_tokens = source_tokens[::-1] yield " ".join(source_tokens), " ".join(target_tokens) def write_parallel_text(sources, targets, output_prefix): """ Writes two files where each line corresponds to one example - [output_p
refix].sources.txt - [output_prefix].targets.txt Args: sources: Iterator of source strings targets: Iterator of target strings output_prefix: Prefix for the output file """ source_filename = os.path.abspath(os.path.join(output_prefix, "sources.txt")) target_filename = os.path.abspath(os.path.join(output_prefix, "targets.txt")) with io.open(source_filename, "w", encoding='utf8') as source_file: for record in sources: source_fi
le.write(record + "\n") print("Wrote {}".format(source_filename)) with io.open(target_filename, "w", encoding='utf8') as target_file: for record in targets: target_file.write(record + "\n") print("Wrote {}".format(target_filename)) def main(): """Main function""" if ARGS.type == "copy": generate_fn = make_copy elif ARGS.type == "reverse": generate_fn = make_reverse # Generate dataset examples = list(generate_fn(ARGS.num_examples, ARGS.min_len, ARGS.max_len)) try: os.makedirs(ARGS.output_dir) except OSError: if not os.path.isdir(ARGS.output_dir): raise # Write train data train_sources, train_targets = zip(*examples) write_parallel_text(train_sources, train_targets, ARGS.output_dir) if __name__ == "__main__": main()
""" Unit tests for ``wheezy.templates.engine.Engine``. """ import unittest class EngineTe
stCase(unittest.TestCase): """ Test the ``Engine``. """ def setUp(self): from wheezy.template.engine import Engine from wheezy.template.loader import DictLoader self.engine = Engine( loader=DictLoader(templates={}), extensions=[]) def test_template_not_found(self): """ Raises IOError. """ self.assertRaises(IOError, lambda: self.engine.get_template('x')) def test_import_not_found(self): """ Raises IOError. """ self.assertRaises(IOError, lambda: self.engine.import_name('x')) def test_remove_unknown_name(self): """ Invalidate name that is not known to engine. """ self.engine.remove('x') def test_remove_name(self): """ Invalidate name that is known to engine. """ self.engine.templates['x'] = 'x' self.engine.renders['x'] = 'x' self.engine.modules['x'] = 'x' self.engine.remove('x')
self.add_input(input) self.goto() # Not sure why this is still needed here. self.refresh() def __str__(self): return type(self).__name__ + ' ' + (self.name or str(self.ordinal)) def set_name(self, name): if self.name is None: del Tab.registry[self.ordinal] else: del Tab.registry[self.name] del self.name if name is None: Tab.registry[self.ordinal] = self else: if name in Tab.registry: match = re.match('(.*)([0-9]+)$', name) if match: name_base = match.group(1) counter = int(match.group(2)) else: name_base = name counter = 1 counter += 1 name = name_base + str(counter) while name in Tab.registry: counter += 1 name = name_base + str(counter) self.name = name Tab.registry[name] = self self.name = name self.update_tab_label() def close(self): for input in self.inputs: input.outputs.discard(self) self.inputs = [] for output in list(self.outputs): self.discard_output(output) self.strips = set() def goto(self): page = Common.gui.notebook_widget.page_num(self.widget) if page >= 0: Common.gui.notebook_widget.set_current_page(page) def select(self, complement=False): if complement: wanted = 2 else: wanted = True if self.selected != wanted: self.selected = wanted if self.hidden: self.unhide() else: self.update_tab_label() def unselect(self): if self.selected: self.selected = False self.update_tab_label() def freeze(self): if not self.frozen: self.frozen = True self.update_tab_label() def unfreeze(self): if self.frozen: self.frozen = False self.refresh() self.update_tab_label() def hide(self): if not self.hidden: page = Common.gui.notebook_widget.page_num(self.widget) assert page >= 0, self Common
.gui.notebook_widget.remove_page(page) self.undisplay_s
trips(self.strips) self.hidden = True def unhide(self): if self.hidden: Common.gui.notebook_widget.append_page(self.widget, gtk.Label()) Common.gui.notebook_widget.set_tab_reorderable(self.widget, True) self.display_strips(self.strips) self.hidden = False def add_input(self, tab): if self.strip_type is None: self.strip_type = tab.strip_type elif not issubclass(tab.strip_type, self.strip_type): raise Error("%s is not made of %s strips" % (tab, self.strip_type.__name__)) tab.add_output(self) def discard_input(self, tab): tab.discard_output(self) def add_output(self, tab): self.outputs.add(tab) if self not in tab.inputs: tab.inputs.append(self) if not tab.frozen: tab.refresh() def discard_output(self, tab): self.outputs.discard(tab) if self in tab.inputs: tab.inputs.remove(self) if not tab.frozen: tab.refresh() def refresh(self): strips = (self.recomputed_strips() | self.added) - self.deleted self.discard_strips(self.strips - strips) self.add_strips(strips) def recomputed_strips(self): # Shall be defined in derived classes. raise NotImplementedError def allowable_strips(self, strips): # Shall be defined in derived classes. raise NotImplementedError def add_strips(self, strips): strips = self.allowable_strips(strips) - self.strips self.strips |= strips for output in self.outputs: if not output.frozen: output.add_strips(strips) if not self.hidden: self.display_strips(strips) return strips def discard_strips(self, strips): strips = strips & self.strips self.strips -= strips for output in self.outputs: if not output.frozen: output.discard_strips(strips) if not self.hidden: self.undisplay_strips(strips) return strips def display_strips(self, strips): Scheduler.Thread(self.display_strips_thread(strips), self) def display_strips_thread(self, strips): for counter, strip in enumerate(sorted(strips)): if counter % 10 == 0 and counter: self.update_tab_label() yield 0 visible_strip = strip.visible_maker(self, strip) self.visible_strip[strip] = visible_strip self.tab_vbox.pack_start(visible_strip.widget, False, False) self.update_tab_label() def undisplay_strips(self, strips): Scheduler.Thread(self.undisplay_strips_thread(strips), self) def undisplay_strips_thread(self, strips): for counter, strip in enumerate(reversed(sorted(strips))): if counter % 10 == 0 and counter: self.update_tab_label() yield 0 self.tab_vbox.remove(self.visible_strip[strip].widget) del self.visible_strip[strip] self.update_tab_label() def create_widget(self): window = gtk.ScrolledWindow() window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) vbox = self.tab_vbox = gtk.VBox(False, Common.gui.spacing) window.add_with_viewport(vbox) window.show_all() Common.gui.notebook_widget.append_page(window, gtk.Label()) Common.gui.notebook_widget.set_tab_reorderable(window, True) self.widget = window def update_tab_label(self): text = '<span' if self.selected: if self.selected == 2: text += ' foreground="' + Common.gui.select2_color + '"' else: text += ' foreground="' + Common.gui.select_color + '"' if self.name is None: name = '%d' % self.ordinal text += ' style="italic"' else: name = self.name if not self.frozen: text += ' weight="bold"' text += ('>' + Common.escape(name) + '</span>' ' <span size="small" foreground="gray50">' + str(len(self.tab_vbox.get_children())) + '</span>') label = gtk.Label() label.set_markup(text) Common.gui.notebook_widget.set_tab_label(self.widget, label) class Preset(Tab): def __init__(self): self.preset_strips = set() Tab.__init__(self) def add_input(self): raise NotImplementedError def discard_input(self): raise NotImplementedError def recomputed_strips(self): return self.preset_strips def allowable_strips(self, strips): return strips & self.preset_strips class Periodic(Preset): period = None capacity = 200 def __init__(self): Preset.__init__(self) Scheduler.Thread(self.periodic_reload_thread()) def periodic_reload_thread(self): yield 0 while True: try: self.reload() except Common.Error: yield 10 else: yield self.period yield True def reload(self): # Shall be defined in derived classes. raise NotImplementedError def refresh(self): if self.capacity is not None: if len(self.preset_strips) > self.capacity: self.preset_strips = set( sorted(self.preset_strips)[-self.capacity:]) Preset.refresh(self) class Union(Tab): name_base = 'Union' def recomputed_strips(self): strips = se
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: messagepath/v1/visibility_rules.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() import kik_unofficial.protobuf.common_model_pb2 as common__model__pb2 from kik_unofficial.protobuf.common.v1 import model_pb2 as common_dot_v1_dot_model__pb2 import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='messagepath/v1/visibility_rules.proto', package='common.messagepath.v1', syntax='proto3', serialized_pb=_b('\n%messagepath/v1/visibility_rules.proto\x12\x15\x63ommon.messagepath.v1\x1a\x12\x63ommon_model.proto\x1a\x15\x63ommon/v1/model.proto\x1a\x19protobuf_validation.proto\"\xbd\x02\n\x19VisibilityRulesAttachment\x12\x32\n\tinitiator\x18\x01 \x01(\x0b\x32\x15.common.XiBareUserJidB\x08\x18\x01\xca\x9d%\x02\x08\x00\x12\x38\n\x0cinitiator_v2\x18\x04 \x01(\x0b\x32\".common.v1.XiBareUserJidOrAliasJid\x12$\n\x1c\x64rop_if_initiator_not_friend\x18\x02 \x01(\x08\x12\x43\n\x04rule\x18\x03 \x01(\x0e\x32\x35.common.messagepath.v1.VisibilityRulesAttachment.Rule\"G\n\x04Rule\x12\x1d\n\x19USE_SENDER_FOR_VISIBILITY\x10\x00\x12 \n\x1cUSE_INITIATOR_FOR_VISIBILITY\x10\x01\x42z\n\x19\x63om.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\xa2\x02\x04MPTHb\x06proto3') , dependencies=[common__model__pb2.DESCRIPTOR,common_dot_v1_dot_model__pb2.DESCRIPTOR,protobuf__validation__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _VISIBILITYRULESATTACHMENT_RULE = _descriptor.EnumDescriptor( name='Rule', full_name='common.messagepath.v1.VisibilityRulesAttachment.Rule', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='USE_SENDER_FOR_VISIBILITY', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='USE_INITIATOR_FOR_VISIBILITY', index=1, number=1, options=None, type=None), ], containing_type=None, options=None, s
erialized_start=381, serialized_end=45
2, ) _sym_db.RegisterEnumDescriptor(_VISIBILITYRULESATTACHMENT_RULE) _VISIBILITYRULESATTACHMENT = _descriptor.Descriptor( name='VisibilityRulesAttachment', full_name='common.messagepath.v1.VisibilityRulesAttachment', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='initiator', full_name='common.messagepath.v1.VisibilityRulesAttachment.initiator', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001\312\235%\002\010\000'))), _descriptor.FieldDescriptor( name='initiator_v2', full_name='common.messagepath.v1.VisibilityRulesAttachment.initiator_v2', index=1, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='drop_if_initiator_not_friend', full_name='common.messagepath.v1.VisibilityRulesAttachment.drop_if_initiator_not_friend', index=2, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='rule', full_name='common.messagepath.v1.VisibilityRulesAttachment.rule', index=3, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _VISIBILITYRULESATTACHMENT_RULE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=135, serialized_end=452, ) _VISIBILITYRULESATTACHMENT.fields_by_name['initiator'].message_type = common__model__pb2._XIBAREUSERJID _VISIBILITYRULESATTACHMENT.fields_by_name['initiator_v2'].message_type = common_dot_v1_dot_model__pb2._XIBAREUSERJIDORALIASJID _VISIBILITYRULESATTACHMENT.fields_by_name['rule'].enum_type = _VISIBILITYRULESATTACHMENT_RULE _VISIBILITYRULESATTACHMENT_RULE.containing_type = _VISIBILITYRULESATTACHMENT DESCRIPTOR.message_types_by_name['VisibilityRulesAttachment'] = _VISIBILITYRULESATTACHMENT VisibilityRulesAttachment = _reflection.GeneratedProtocolMessageType('VisibilityRulesAttachment', (_message.Message,), dict( DESCRIPTOR = _VISIBILITYRULESATTACHMENT, __module__ = 'messagepath.v1.visibility_rules_pb2' # @@protoc_insertion_point(class_scope:common.messagepath.v1.VisibilityRulesAttachment) )) _sym_db.RegisterMessage(VisibilityRulesAttachment) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\031com.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\242\002\004MPTH')) _VISIBILITYRULESATTACHMENT.fields_by_name['initiator'].has_options = True _VISIBILITYRULESATTACHMENT.fields_by_name['initiator']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001\312\235%\002\010\000')) # @@protoc_insertion_point(module_scope)
aseInsensitiveEnumMeta, str, Enum)): """Shared/dedicated workers. """ SHARED = "Shared" DEDICATED = "Dedicated" DYNAMIC = "Dynamic" class ConnectionStringType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Type of database. """ MY_SQL = "MySql" SQL_SERVER = "SQLServer" SQL_AZURE = "SQLAzure" CUSTOM = "Custom" NOTIFICATION_HUB = "NotificationHub" SERVICE_BUS = "ServiceBus" EVENT_HUB = "EventHub" API_HUB = "ApiHub" DOC_DB = "DocDb" REDIS_CACHE = "RedisCache" POSTGRE_SQL = "PostgreSQL" class ContinuousWebJobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Job status. """ INITIALIZING = "Initializing" STARTING = "Starting" RUNNING = "Running" PENDING_RESTART = "PendingRestart" STOPPED = "Stopped" class CookieExpirationConvention(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The convention used when determining the session cookie's expiration. """ FIXED_TIME = "FixedTime" IDENTITY_PROVIDER_DERIVED = "IdentityProviderDerived" class CustomDomainStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The status of the custom domain """ RETRIEVING_VALIDATION_TOKEN = "RetrievingValidationToken" VALIDATING = "Validating" ADDING = "Adding" READY = "Ready" FAILED = "Failed" DELETING = "Deleting" class CustomHostNameDnsRecordType(with_metaclass(CaseIns
ensitiveEnumMeta, str, Enum)): """Type of the DNS record. """ C_NAME = "CName" A = "A" class DatabaseType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Database type (e.g. SqlAzure / MySql). """ SQL_AZURE = "SqlAzure" MY_SQL = "MySql" LOCAL_MY_SQL = "LocalMySql" POSTGRE_SQL = "PostgreSql" class DetectorType(with_m
etaclass(CaseInsensitiveEnumMeta, str, Enum)): """Whether this detector is an Analysis Detector or not. """ DETECTOR = "Detector" ANALYSIS = "Analysis" CATEGORY_OVERVIEW = "CategoryOverview" class DnsType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Current DNS type """ AZURE_DNS = "AzureDns" DEFAULT_DOMAIN_REGISTRAR_DNS = "DefaultDomainRegistrarDns" class DnsVerificationTestResult(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """DNS verification test result. """ PASSED = "Passed" FAILED = "Failed" SKIPPED = "Skipped" class DomainPatchResourcePropertiesDomainNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal" EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange" SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive" class DomainPropertiesDomainNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal" EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange" SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive" class DomainStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Domain registration status. """ ACTIVE = "Active" AWAITING = "Awaiting" CANCELLED = "Cancelled" CONFISCATED = "Confiscated" DISABLED = "Disabled" EXCLUDED = "Excluded" EXPIRED = "Expired" FAILED = "Failed" HELD = "Held" LOCKED = "Locked" PARKED = "Parked" PENDING = "Pending" RESERVED = "Reserved" REVERTED = "Reverted" SUSPENDED = "Suspended" TRANSFERRED = "Transferred" UNKNOWN = "Unknown" UNLOCKED = "Unlocked" UNPARKED = "Unparked" UPDATED = "Updated" JSON_CONVERTER_FAILED = "JsonConverterFailed" class DomainType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Valid values are Regular domain: Azure will charge the full price of domain registration, SoftDeleted: Purchasing this domain will simply restore it and this operation will not cost anything. """ REGULAR = "Regular" SOFT_DELETED = "SoftDeleted" class Enum10(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): WINDOWS = "Windows" LINUX = "Linux" WINDOWS_FUNCTIONS = "WindowsFunctions" LINUX_FUNCTIONS = "LinuxFunctions" ALL = "All" class Enum11(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): WINDOWS = "Windows" LINUX = "Linux" ALL = "All" class Enum12(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): WINDOWS = "Windows" LINUX = "Linux" ALL = "All" class Enum13(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): WINDOWS = "Windows" LINUX = "Linux" ALL = "All" class Enum14(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): WINDOWS = "Windows" LINUX = "Linux" ALL = "All" class Enum15(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): WINDOWS = "Windows" LINUX = "Linux" WINDOWS_FUNCTIONS = "WindowsFunctions" LINUX_FUNCTIONS = "LinuxFunctions" ALL = "All" class ForwardProxyConvention(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The convention used to determine the url of the request made. """ NO_PROXY = "NoProxy" STANDARD = "Standard" CUSTOM = "Custom" class FrequencyUnit(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The unit of time for how often the backup should be executed (e.g. for weekly backup, this should be set to Day and FrequencyInterval should be set to 7) """ DAY = "Day" HOUR = "Hour" class FrontEndServiceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): NODE_PORT = "NodePort" LOAD_BALANCER = "LoadBalancer" class FtpsState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """State of FTP / FTPS service """ ALL_ALLOWED = "AllAllowed" FTPS_ONLY = "FtpsOnly" DISABLED = "Disabled" class HostingEnvironmentStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Current status of the App Service Environment. """ PREPARING = "Preparing" READY = "Ready" SCALING = "Scaling" DELETING = "Deleting" class HostNameType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Type of the hostname. """ VERIFIED = "Verified" MANAGED = "Managed" class HostType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Indicates whether the hostname is a standard or repository hostname. """ STANDARD = "Standard" REPOSITORY = "Repository" class InAvailabilityReasonType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """:code:`<code>Invalid</code>` indicates the name provided does not match Azure App Service naming requirements. :code:`<code>AlreadyExists</code>` indicates that the name is already in use and is therefore unavailable. """ INVALID = "Invalid" ALREADY_EXISTS = "AlreadyExists" class InsightStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Level of the most severe insight generated by the detector. """ CRITICAL = "Critical" WARNING = "Warning" INFO = "Info" SUCCESS = "Success" NONE = "None" class IpFilterTag(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Defines what this IP filter will be used for. This is to support IP filtering on proxies. """ DEFAULT = "Default" XFF_PROXY = "XffProxy" SERVICE_TAG = "ServiceTag" class IssueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Represents the type of the Detector """ SERVICE_INCIDENT = "ServiceIncident" APP_DEPLOYMENT = "AppDeployment" APP_CRASH = "AppCrash" RUNTIME_ISSUE_DETECTED = "RuntimeIssueDetected" ASE_DEPLOYMENT = "AseDeployment" USER_ISSUE = "UserIssue" PLATFORM_ISSUE = "PlatformIssue" OTHER = "Other" class KeyVaultSecretStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Status of the Key Vault secret. """ INITIALIZED = "Initialized" WAITING_ON_CERTIFICATE_ORDER = "WaitingOnCertificateOrder" SUCCEEDED = "Succeeded" CERTIFICATE_ORDER_FAILED = "CertificateOrderFailed" OPERATION_NOT_PERMITTED_ON_KEY
# tf_unet is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # tf_unet is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with tf_unet. If not, see <http://www.gnu.org/licenses/>. ''' Created on Jul 28, 2016 author: jakeret Trains a tf_unet network to segment nerves in the Ultrasound Kaggle Dataset. Requires the Kaggle dataset. ''' from __future__ import print_function, division, absolute_import, unicode_literals import os import click import numpy as np from PIL import Image from tf_unet import unet from tf_unet import util from tf_unet.image_util import ImageDataProvider IMG_SIZE = (290, 210) @click.command() @click.option('--data_root', default="../../ultrasound/train") @click.option('--output_path', default="./unet_trained_ultrasound") @click.option('--training_iters', default=20) @click.option('--epochs', default=100) @click.option('--restore', default=False) @click.option('--layers', default=3) @click.option('--features_root', default=32) def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root): print("Using data from: %s"%data_root) if not os.path.exists(data_root): raise IOError("Kaggle Ultrasound Dataset not found") data_provider = DataProvider(search_path=data_root + "/*.tif", mean=100, std=56) net = unet.Unet(channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root, #cost="dice_coefficient", ) path = output_path if restore else util.create_training_path(output_path) trainer = unet.Trainer(net, batch_size=1, norm_grads=False, optimizer="adam") path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore)
x_test, y_test = data_provider(1) prediction = net.predict(path, x_test) print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape)))) class DataProvider(ImageDataProvider): """ Extends the default ImageDataProvider to randomly sel
ect the next image and ensures that only data sets are used where the mask is not empty. The data then gets mean and std adjusted """ def __init__(self, mean, std, *args, **kwargs): super(DataProvider, self).__init__(*args, **kwargs) self.mean = mean self.std = std def _next_data(self): data, mask = super(DataProvider, self)._next_data() while mask.sum() == 0: self._cylce_file() data, mask = super(DataProvider, self)._next_data() return data, mask def _process_data(self, data): data -= self.mean data /= self.std return data def _load_file(self, path, dtype=np.float32): image = Image.open(path) return np.array(image.resize(IMG_SIZE), dtype) def _cylce_file(self): self.file_idx = np.random.choice(len(self.data_files)) if __name__ == '__main__': launch()
), ) # CRUD Strings s3.crud_strings[tablename] = Storage( label_create = T("Add Modem Channel"), title_display = T("Modem Channel Details"), title_list = T("Modem Channels"), title_update = T("Edit Modem Channel"), label_list_button = T("View Modem Channels"), msg_record_created = T("Modem Channel added"), msg_record_modified = T("Modem Channel updated"), msg_record_deleted = T("Modem Channel deleted"), msg_list_empty = T("No Modem Channels currently defined"), ) return s3_rest_controller() #------------------------------------------------------------------------------ @auth.s3_requires_membership(1) def sms_smtp_channel(): """ RESTful CRUD controller for SMTP to SMS Outbound channels - appears in the administration menu """ tablename = "%s_%s" % (c, f) table = s3db[tablename] table.address.label = T("Address") table.subject.label = T("Subject") table.enabled.label = T("Enabled") table.address.comment = DIV(_class = "tooltip", _title = "%s|%s" % (T("Address"), T("Email Address to which to send SMS messages. Assumes sending to phonenumber@address"), ), ) table.subject.comment = DIV(_class = "tooltip", _title = "%s|%s" % (T("Subject"), T("Optional Subject to put into Email - can be used as a Security Password by the service provider"), ), ) table.enabled.comment = DIV(_class = "tooltip", _title = "%s|%s" % (T("Enabled"), T("Unselect to disable this SMTP service"), ), ) # CRUD Strings s3.crud_strings["msg_sms_outbound_gateway"] = Storage( label_create=T("Create SMTP to SMS Channel"), title_display=T("SMTP to SMS Channel Details"), title_list=T("SMTP to SMS Channels"), title_update=T("Edit SMTP to SMS Channel"), label_list_button=T("List SMTP to SMS Channels"), label_delete_button=T("Delete SMTP to SMS Channel"), msg_record_created=T("SMTP to SMS Channel added"), msg_record_modified=T("SMTP to SMS Channel updated"), msg_record_deleted=T("SMTP to SMS Channel deleted"), msg_list_empty=T("No SMTP to SMS Channels currently registered"), ) s3db.configure(tablename, update_next = URL(args = [1, "update"]), ) return s3_rest_controller() #------------------------------------------------------------------------------ @auth.s3_requires_membership(1) def sms_webapi_channel(): """ RESTful CRUD controller for Web API channels - appears in the administration menu """ tablename = "%s_%s" % (c, f) table = s3db[tablename] table.url.label = T("URL") table.message_variable.label = T("Message variable") table.to_variable.label = T("To variable") table.username.label = T("Username") table.password.label = T("Password") table.enabled.label = T("Enabled") table.url.comment = DIV(_class = "tooltip", _title = "%s|%s" % (T("URL"), T("The URL of your web gateway without the POST parameters"), ), ) table.parameters.comment = DIV(_class = "tooltip", _title = "%s|%s" % (T("Parameters"), T("The POST variables other than the ones containing the message and the phone number"), ), ) table.message_variable.comment = DIV(_class = "tooltip", _title="%s|%s" % (T("Message Variable"), T("The POST variable on the URL used for sending messages"), ), ) table.to_variable.comment = DIV(_class = "tooltip", _title = "%s|%s" % (T("To variable"), T("The POST variable containing the phone number"), ), ) table.username.comment = DIV(_class = "tooltip", _title = "%s|%s" % (T("Username"), T("If the service requries HTTP BASIC Auth (e.g. Mobile Commons)"), ), ) table.password.comment = DIV(_class = "tooltip", _title="%s|%s" % (T("Password"), T("If the service requries HTTP BASIC Auth (e.g. Mobile Commons)"), ), ) table.enabled.comment = DIV(_class = "tooltip", _title="%s|%s" % (T("Enabled"), T("Unselect to disable this API service"), ), ) # CRUD Strings s3.crud_strings[tablename] = Storage( label_create = T("Create Web API Channel"), title_display = T("Web API Channel Details"), title_list = T("Web API Channels"), title_update = T("Edit Web API Channel"), label_list_button = T("List Web API Channels"), label_delete_button = T("Delete Web API Channel"), msg_record_created = T("Web API Channel added"), msg_record_modified = T("Web API Channel updated"), msg_record_deleted = T("Web API Channel deleted"), msg_list_empty = T("No Web API Channels currently registered"), ) return s3_rest_controller() # ----------------------------------------------------------------------------- @auth.s3_requires_membership(1) def tropo_channel(): """ RESTful CRUD controller for Tropo channels - appears in the administration menu """ tablename = "msg_tropo_channel" table = s3db[tablename] table.token_messaging.label = T("Tropo Messaging Token") table.token_messaging.comment = DIV(DIV(_class = "stickytip", _title = "%s|%s" % (T("Tropo Messaging Token"), T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>"), ), ) #table.token_voice.label = T("Tropo Voice Token") #table.token_voice.comment = DIV(DIV(_class="stickytip",_title=T("Tropo Voice Token") + "|" + T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.ht
m' target=_blank>Tropo.com</a>")) # CRUD Strings s3.crud_strings[tablename] = Storage( label_create = T("Create Tropo Channel"), title_display = T("Tropo Channel Details"), title_list = T("Tropo Channels"), title_update = T("Edit Tropo Channel"), lab
el_list_button = T("List Tropo Channels"), label_delete_button = T("Delete Tropo Channel"), msg_record_created = T("Tropo Channel added"), msg_record_modified = T("Tropo Channel updated"), msg_record_deleted = T("Tropo Channel deleted"),
m2 = int(init[val]['mean']) + 6 * S y = yl[m1:m2].copy() xt = xl[m1:m2] for peakid in range(max(0, val - S), min(N, val + S + 1)): if peakid == val: continue y -= gauss_box_model(xt, **init[peakid]) model = GaussBox(**init[val]) model.mean.min = model.mean.value - 0.5 model.mean.max = model.mean.value + 0.5 # model.mean.fixed = True model.stddev.min = 1.0 model.stddev.max = 2.0 model.hpix.fixed = True fitter = fitting.LevMarLSQFitter() model_fitted = fitter(model, xt, y) na = model_fitted.amplitude.value nm = model_fitted.mean.value ns = model_fitted.stddev.value changes_a[val, il] = na - init[val]['amplitude'] changes_m[val, il] = nm - init[val]['mean'] changes_s[val, il] = ns - init[val]['stddev'] init[val]['amplitude'] = na init[val]['mean'] = nm init[val]['stddev'] = ns return init, (changes_a, changes_m, changes_s) def calc_sparse_matrix(self, final, nrows, cut=1.0e-6, extra=10): from scipy.sparse import lil_matrix idxs = range(len(final)) # g_ampl = np.array([final[i]['amplitude'] for i in idxs]) g_mean = np.array([final[i]['mean'] for i in idxs]) g_std = np.array([final[i]['stddev'] for i in idxs]) # calc w begpix = np.ceil(g_mean - 0.5).astype('int') steps = np.arange(-extra, extra) ref = begpix + steps[:, np.newaxis] rr = gauss_box_model(ref, mean=g_mean, stddev=g_std) rrb = begpix - extra # Filter values below 'cut' rr[rr < cut] = 0.0 # Calc Ws matrix block, nfib = rr.shape w_init = lil_matrix((nrows, nfib)) for i in range(nfib): w_init[rrb[i]:rrb[i] + block, i] = rr[:, i, np.newaxis] # Convert to CSR matrix wcol = w_init.tocsr() return wcol def calc_profile(self, data1, pols, col, sigma, start=0, doplots=False): # print 'calc_profile: fitting column', col peaks = np.array([pol(col) for pol in pols]) boxd = data1[:, col] centers = peaks[:] - start sigs = sigma * np.ones_like(centers) scale_sig = 0.25 # For sigma ~= 1.5, the peak is typically 0.25 ecenters = np.ceil(centers - 0.5).astype('int') N = len(centers) cmax = boxd.max() yl = boxd / cmax # Normalize to peak xl = np.arange(len(yl)) init_vals = {} for i in range(N): init_vals[i] = {} init_vals[i]['amplitude'] = yl[ecenters[i]] / scale_sig # init_vals[i]['mean'] = ecenters[i] init_vals[i]['mean'] = centers[i] init_vals[i]['stddev'] = sigma final, changes = self.fit1d_profile(xl, yl, init_vals, N, nloop=10) for i in range(N): final[i]['amplitude'] = final[i]['amplitude'] * cmax return final def run(self, rinput): temporary_path = mkdtemp() parameters = self.get_parameters(rinput) data2 = self.bias_process_common(rinput.obresult, parameters) pols2 = [np.poly1d(t['fitparms']) for t in rinput.tracemap] nrows = data2[0].shape[0] # 4112 total_number = data2[0].shape[1] cols = range(total_number) # 4096 # ORIGINAL self._check_directory(os.path.join(
temporary_path,'chunks')) self._check_directory(os.path.join(temporary_path,'json')) pool = mp.Pool(processes=self.procesos) results = [pool.apply_async(calc_all, args=(ite, data2[0].data, pols2, nrows, temporary_path)) for ite in cols] results = [p.get() for p in results] self.compress(os.path.join(temporary_path,'chunks'),os.path.jo
in(temporary_path,'master_weights')) result = self.create_result(master_weights=os.path.join(temporary_path,'master_weights.tar')) # shutil.rmtree(temporary_path) return result def norm_pdf_t(x): return np.exp(-0.5 * x * x) / M_SQRT_2_PI def gauss_box_model_deriv(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5): '''Integrate a gaussian profile.''' z = (x - mean) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev da = norm.cdf(z2) - norm.cdf(z1) fp2 = norm_pdf_t(z2) fp1 = norm_pdf_t(z1) dl = -amplitude / stddev * (fp2 - fp1) ds = -amplitude / stddev * (fp2 * z2 - fp1 * z1) dd = amplitude / stddev * (fp2 + fp1) return (da, dl, ds, dd) def gauss_box_model(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5): '''Integrate a gaussian profile.''' z = (x - mean) / stddev m2 = z + hpix / stddev m1 = z - hpix / stddev return amplitude * (norm.cdf(m2) - norm.cdf(m1)) def pixcont(i, x0, sig, hpix=0.5): '''Integrate a gaussian profile.''' z = (i - x0) / sig hpixs = hpix / sig z2 = z + hpixs z1 = z - hpixs return norm.cdf(z2) - norm.cdf(z1) def g_profile(xl, l, s): '''A gaussian profile.''' z = (xl - l) / s return np.exp(-0.5 * z ** 2) def fit1d_profile(xl, yl, init0, N, nloop=10, S=3): """Iterative fitting""" init = copy.deepcopy(init0) changes_a = np.zeros((N, nloop)) changes_m = np.zeros((N, nloop)) changes_s = np.zeros((N, nloop)) for il in range(nloop): values = np.random.permutation(N) for val in values: m1 = max(0, int(init[val]['mean']) - 6 * S) m2 = int(init[val]['mean']) + 6 * S y = yl[m1:m2].copy() xt = xl[m1:m2] for peakid in range(max(0, val - S), min(N, val + S + 1)): if peakid == val: continue y -= gauss_box_model(xt, **init[peakid]) model = GaussBox(**init[val]) model.mean.min = model.mean.value - 0.5 model.mean.max = model.mean.value + 0.5 # model.mean.fixed = True model.stddev.min = 1.0 model.stddev.max = 2.0 model.hpix.fixed = True fitter = fitting.LevMarLSQFitter() model_fitted = fitter(model, xt, y) na = model_fitted.amplitude.value nm = model_fitted.mean.value ns = model_fitted.stddev.value changes_a[val, il] = na - init[val]['amplitude'] changes_m[val, il] = nm - init[val]['mean'] changes_s[val, il] = ns - init[val]['stddev'] init[val]['amplitude'] = na init[val]['mean'] = nm init[val]['stddev'] = ns return init, (changes_a, changes_m, changes_s) def calc_sparse_matrix(final, nrows, cut=1.0e-6, extra=10): from scipy.sparse import lil_matrix idxs = range(len(final)) # g_ampl = np.array([final[i]['amplitude'] for i in idxs]) g_mean = np.array([final[i]['mean'] for i in idxs]) g_std = np.array([final[i]['stddev'] for i in idxs]) # calc w begpix = np.ceil(g_mean - 0.5).astype('int') steps = np.arange(-extra, extra) ref = begpix + steps[:, np.newaxis] rr = gauss_box_model(ref, mean=g_mean, stddev=g_std) rrb = begpix - extra # Filter values below 'cut' rr[rr < cut] = 0.0 # Calc Ws matrix block, nfib = rr.shape w_init = lil_matrix((nrows, nfib)) for i in range(nfib): w_init[rrb[i]:rrb[i] + block, i] = rr[:, i, np.newaxis] # Convert to CSR matrix wcol = w_init.tocsr() return wcol def calc_profile(data1, pols, col, sigma, start=0, doplots=False): # print 'calc_profile: fitting column', col peaks = np.array([pol(col) for pol in pols]) boxd = data1[:, col] centers = peaks[:] - start sigs = sigma * np.ones_like(centers) scale_sig = 0.25 # For sigma ~= 1.5, the peak is typically 0.25 ecenters = np.ceil(centers - 0.5).astype('int') N = len(centers)
atabases:index') class CreateDatabaseView(horizon_forms.ModalFormView): form_class = forms.CreateDatabaseForm form_id = "create_database_form" modal_header = _("Create Database") modal_id = "create_database_modal" template_name = 'project/databases/create_database.html' submit_label = _("Create Database") submit_url = 'horizon:project:databases:create_database' success_url = 'horizon:project:databases:detail' def get_success_url(self): return reverse(self.success_url, args=(self.kwargs['instance_id'],)) def get_context_data(self, **kwargs): context = super(CreateDatabaseView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] args = (self.kwargs['instance_id'],) context['submit_url'] = reverse(self.submit_url, args=args) return context def get_initial(self): instance_id = self.kwargs['instance_id'] return {'instance_id': instance_id} class ResizeVolumeView(horizon_forms.ModalFormView): form_class = forms.ResizeVolumeForm form_id = "resize_volume_form" modal_header = _("Resize Database Volume") modal_id = "resize_volume_modal" template_name = 'project/databases/resize_volume.html' submit_label = "Resize Database Volume" submit_url = 'horizon:project:databases:resize_volume' success_url = reverse_lazy('horizon:project:databases:index') page_title = _("Resize Database Volume") @memoized.memoized_method def get_object(self, *args, **kwargs): instance_id = self.kwargs['instance_id'] try: return api.trove.instance_get(self.request, instance_id) except Exception: msg = _('Unable to retrieve instance details.') redirect = reverse('horizon:project:databases:index') exceptions.handle(self.request, msg, redirect=redirect) def get_context_data(self, **kwargs): context = super(ResizeVolumeView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] args = (self.kwargs['instance_id'],) context['submit_url'] = reverse(self.submit_url, args=args) return context def get_initial(self): instance = self.get_object() return {'instance_id': self.kwargs['instance_id'], 'orig_size': instance.volume.get('size', 0)} class ResizeInstanceView(horizon_forms.ModalFormView): form_class = forms.ResizeInstanceForm form_id = "resize_instance_form" modal_header = _("Resize Database Instance") modal_id = "resize_instance_modal" template_name = 'project/databases/resize_instance.html' submit_label = "Resize Database Instance" submit_url = 'horizon:project:databases:resize_instance' success_url = reverse_lazy('horizon:project:databases:index') page_title = _("Resize Database Instance") @memoized.memoized_method def get_object(self, *args, **kwargs): instance_id = self.kwargs['instance_id'] try: instance = api.trove.instance_get(self.request, instance_id) flavor_id = instance.flavor['id'] flavors = {} for i, j in self.get_flavors(): flavors[str(i)] = j if flavor_id in flavors: instance.flavor_name = flavors[flavor_id] else: flavor = api.trove.flavor_get(self.request, flavor_id) instance.flavor_name = flavor.name return instance except Exception: redirect = reverse('horizon:project:databases:index') msg = _('Unable to retrieve instance details.') exceptions.handle(self.request, msg, redirect=redirect) def get_context_data(self, **kwargs): context = super(ResizeInstanceView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] args = (self.kwargs['instance_id'],) context['submit_url'] = reverse(self.submit_url, args=args) return context @memoized.memoized_method def get_flavors(self, *args, **kwargs): try: flavors = api.trove.flavor_list(self.request) return instance_utils.sort_flavor_list(self.request, flavors) except Exception: redirect = reverse("horizon:project:databases:index") exceptions.handle(self.request, _('Unable to retrieve flavors.'), redirect=redirect) def get_initial(self): initial = super(ResizeInstanceView, self).get_initial() obj = self.get_object() if obj: initial.update({'instance_id': self.kwargs['instance_id'], 'old_flavor_id': obj.flavor['id'], 'old_flavor_name': getattr(obj, 'flavor_name', ''), 'flavors': self.get_flavors()}) return initial class PromoteToReplicaSourceView(horizon_forms.ModalFormView): form_class = forms.PromoteToReplicaSourceForm form_id = "promote_to_replica_source_form" modal_header = _("Promote to Replica Source") modal_id = "promote_to_replica_source_modal" template_name = 'project/databases/promote_to_replica_source.html' submit_lable = _("Promote") submit_url = 'horizon:project:databases:promote_to_replica_source' success_url = reverse_lazy('horizon:project:databases:index') @memoized.memoized_method def get_object(self, *args, **kwargs): instance_id = self.kwargs['instance_id'] try: replica = api.trove.instance_get(self.request, instance_id) replica_source = api.trove.instance_get(self.request, replica.replica_of['id']) instances = {'replica': replica, 'replica_source': rep
lica_source} return insta
nces except Exception: msg = _('Unable to retrieve instance details.') redirect = reverse('horizon:project:databases:index') exceptions.handle(self.request, msg, redirect=redirect) def get_context_data(self, **kwargs): context = \ super(PromoteToReplicaSourceView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] context['replica'] = self.get_initial().get('replica') context['replica'].ip = \ self.get_initial().get('replica').ip[0] context['replica_source'] = self.get_initial().get('replica_source') context['replica_source'].ip = \ self.get_initial().get('replica_source').ip[0] args = (self.kwargs['instance_id'],) context['submit_url'] = reverse(self.submit_url, args=args) return context def get_initial(self): instances = self.get_object() return {'instance_id': self.kwargs['instance_id'], 'replica': instances['replica'], 'replica_source': instances['replica_source']} class EnableRootInfo(object): def __init__(self, instance_id, instance_name, enabled, password=None): self.id = instance_id self.name = instance_name self.enabled = enabled self.password = password class ManageRootView(horizon_tables.DataTableView): table_class = tables.ManageRootTable template_name = 'project/databases/manage_root.html' page_title = _("Manage Root Access") @memoized.memoized_method def get_data(self): instance_id = self.kwargs['instance_id'] try: instance = api.trove.instance_get(self.request, instance_id) except Exception: redirect = reverse('horizon:project:databases:detail', args=[instance_id]) exceptions.handle(self.request, _('Unable to retrieve instance details.'), redirect=redirect) try: enabled = api.trove.root_show(self.request, instance_id)
# Copyright (C) 2013-2015 MetaMorph Software, Inc # Permission is hereby granted, free of charge, to any person obtaining a # copy of this data, including any software or models in source or binary # form, as well as any drawings, specifications, and documentation # (collectively "the Data"), to deal in the Data without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Data, and to # permit persons to whom the Data is furnished to do so, subject to the # following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Data. # THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA. # ======================= # This version of the META tools is a fork of an original version produced # by Vanderbilt University's Institute for Software Integrated Systems (ISIS). # Their license statement: # Copyright (C) 2011-2014 Vanderbilt University # Developed with the sponsorship of the Defense Advanced Research Projects # Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights # as defined in DFARS 252.227-7013. # Permission is hereby granted, free of charge, to any person obtaining a # copy of this data, including any software or models in source or binary # form, as well as any drawings, specifications, and documentation # (collectively "the Data"), to deal in the Data without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Data, and to # permit persons to whom the Data is furnished to do so, subject to the # following conditions:
# The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Data. # THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILIT
Y, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA. #!/usr/bin/python ### # This module provides the 'make CF card' function to OM # The two functions "update_fs" and "prepare_fs" should # be provided to the user of the OM so that the user # can perform either one at any time # # At the bottom there is an example of running the function. # In the OM you'll need to have an option for the user to select # which device enumeration to use (or input it through text) ### import copy, os, shutil, subprocess, string, glob, fnmatch, shlex import threading import time import sys def scan_for_CAD_files(mypath): print "Starting test script for ExtractACM-XMLfromCASModules.exe" from os import listdir from os.path import isfile, join, getsize matches = [] for root, dirs, files in os.walk(mypath): for filename in fnmatch.filter(files, '*.prt*') + fnmatch.filter(files, '*.asm*'): if not filename.endswith('.xml'): matches.append(os.path.join(root, filename)) max_threads = 1 threads = [] for fn in matches: while count_alive_threads(threads) >= max_threads: time.sleep(1) newThread = threading.Thread(target=run_the_extractor, kwargs={"filename": fn}) newThread.start() threads.append(newThread) def count_alive_threads(thread_array): count = 0 for t in thread_array: if t.isAlive(): count += 1 return count def run_the_extractor(filename): print "converting " + filename outfilename = filename + '.xml' exe_path = os.getenv("PROE_ISIS_EXTENSIONS") + 'bin\ExtractACM-XMLfromCreoModels.exe' arguments = ' -c "'+filename+'" -x "' + outfilename + '"' command = exe_path + arguments return_code = subprocess.call(command) if return_code: print " Error on converting file "+ filename + " (return code " + str(return_code) + ")" if __name__ == "__main__": if len(sys.argv) != 2: print "Syntax: testExtractACM <PathtoScan>" exit() mypath = sys.argv[1] scan_for_CAD_files(mypath)
#!/usr/bin/env python # -*- coding: utf-8 -*- import re import urlparse from scrapy import log from scrapy.http import Request from base.base_wolf import Base_Wolf class Wolf(Base_Wolf): def __init__(self, *args, **kwargs): super(Wolf, self).__init__(*args, **kwargs) self.name = 'henbt' self.seed_urls = [ 'http://henbt.com/', ] self.base_url = 'http://henbt.com/' self.rule['follow'] = re
.compile(r'show-') self.anchor['desc'] = "//*[@class='intro']" def get_resource(self, item, response, tree): item = super(Wolf, self).get_resource(item, response, tree) resource = tree.xpath("//*[@class='original download']//a/@href") downloads = [urlparse.urljoin(self.base_url, r) for r in resource if re.match(r'down.php', r)] if len(downloads):
return self.download_bt(item, [Request(d, cookies=self.cookiejar._cookies,) for d in downloads]) else: self.log("No Resource DropItem %s" % item['source'], level=log.WARNING) return None
#!
/usr/bin/env python #!-*- coding:utf-8 -*- def read(filename): dic=[] with open(filename,'r') as fp: while True: lines = fp.readlines(100
00) if not lines : break for line in lines: #line = line.strip('\n') dic.append(line) return dic def Write(file,dic): with open(file,'w') as fp: for i in dic: fp.write(i) if __name__=='__main__': test = read('output.txt') test += read("dire.txt") print test Write('output.txt',set(test))
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright
2013, 2014 Sourcefabric z.u. and contributor
s. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license import superdesk from .service import PackagesService from .resource import PackagesResource def init_app(app) -> None: """Initialize the `packages` API endpoint. :param app: the API application object :type app: `Eve` """ endpoint_name = "packages" service = PackagesService(endpoint_name, backend=superdesk.get_backend()) PackagesResource(endpoint_name, app=app, service=service)
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bson from mock import patch, Mock, MagicMock import unittest2 # XXX: There is an import dependency. Config needs to setup # before importing remote_script_runner classes. import st2tests.config as tests_config tests_config.parse_args() from st2common.util import jsonify from remote_script_runner import ParamikoRemoteScriptRunner from st2common.runners.parallel_ssh import ParallelSSHClient from st2common.exceptions.ssh import InvalidCredentialsException from st2common.exceptions.ssh import NoHostsConnectedToException from st2common.models.system.paramiko_script_action import ParamikoRemoteScriptAction from st2common.constants.action import LIVEACTION_STATUS_FAILED from st2tests.fixturesloader import FixturesLoader __all__ = [ 'ParamikoScriptRunnerTestCase' ] FIXTURES_PACK = 'generic' TEST_MODELS = { 'actions': ['a1.yaml'] } MODELS = FixturesLoader().load_models(fixtures_pack=FIXTURES_PACK, fixtures_dict=TEST_MODELS) ACTION_1 = MODELS['actions']['a1.yaml'] class ParamikoScriptRunnerTestCase(unittes
t2.TestCase): @patch('st2common.runners.parallel_ssh.ParallelSSHClient', Mock) @patch.object(jsonify, 'json_loads', MagicMock(return_value={})) @patch.object(ParallelSSHClient, 'run', MagicMock(return_value={})) @patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={})) def test_cwd_used_correctly(self): remote_action = ParamikoRemoteScriptAction( 'foo-script', bson.ObjectId(), script_local_path_abs='/home/stanley/shiz_storm.p
y', script_local_libs_path_abs=None, named_args={}, positional_args=['blank space'], env_vars={}, on_behalf_user='svetlana', user='stanley', private_key='---SOME RSA KEY---', remote_dir='/tmp', hosts=['127.0.0.1'], cwd='/test/cwd/' ) paramiko_runner = ParamikoRemoteScriptRunner('runner_1') paramiko_runner._parallel_ssh_client = ParallelSSHClient(['127.0.0.1'], 'stanley') paramiko_runner._run_script_on_remote_host(remote_action) exp_cmd = "cd /test/cwd/ && /tmp/shiz_storm.py 'blank space'" ParallelSSHClient.run.assert_called_with(exp_cmd, timeout=None) @patch('st2common.runners.parallel_ssh.ParallelSSHClient', Mock) @patch.object(ParallelSSHClient, 'run', MagicMock(return_value={})) @patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={})) def test_username_only_ssh(self): paramiko_runner = ParamikoRemoteScriptRunner('runner_1') paramiko_runner.runner_parameters = {'username': 'test_user', 'hosts': '127.0.0.1'} self.assertRaises(InvalidCredentialsException, paramiko_runner.pre_run) def test_username_invalid_private_key(self): paramiko_runner = ParamikoRemoteScriptRunner('runner_1') paramiko_runner.runner_parameters = { 'username': 'test_user', 'hosts': '127.0.0.1', 'private_key': 'invalid private key', } paramiko_runner.context = {} self.assertRaises(NoHostsConnectedToException, paramiko_runner.pre_run) @patch('st2common.runners.parallel_ssh.ParallelSSHClient', Mock) @patch.object(ParallelSSHClient, 'run', MagicMock(return_value={})) @patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={})) def test_top_level_error_is_correctly_reported(self): # Verify that a top-level error doesn't cause an exception to be thrown. # In a top-level error case, result dict doesn't contain entry per host paramiko_runner = ParamikoRemoteScriptRunner('runner_1') paramiko_runner.runner_parameters = { 'username': 'test_user', 'hosts': '127.0.0.1' } paramiko_runner.action = ACTION_1 paramiko_runner.liveaction_id = 'foo' paramiko_runner.entry_point = 'foo' paramiko_runner.context = {} paramiko_runner._cwd = '/tmp' paramiko_runner._copy_artifacts = Mock(side_effect=Exception('fail!')) status, result, _ = paramiko_runner.run(action_parameters={}) self.assertEqual(status, LIVEACTION_STATUS_FAILED) self.assertEqual(result['failed'], True) self.assertEqual(result['succeeded'], False) self.assertTrue('Failed copying content to remote boxes' in result['error'])
# This file is part of the myhdl library, a Python package for using # Python as a Hardware Description Language. # # Copyright (C) 2003-2008 Jan Decaluwe # # The myhdl library is free softwar
e; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of the # License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABIL
ITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ Module with the always function. """ import inspect from types import FunctionType from myhdl import InstanceError from myhdl._util import _isGenFunc, _makeAST from myhdl._Waiter import _inferWaiter from myhdl._resolverefs import _AttrRefTransformer from myhdl._visitors import _SigNameVisitor class _error: pass _error.NrOfArgs = "decorated generator function should not have arguments" _error.ArgType = "decorated object should be a generator function" class _CallInfo(object): def __init__(self, name, modctxt, symdict): self.name = name self.modctxt = modctxt self.symdict = symdict def _getCallInfo(): """Get info on the caller of an Instantiator. An Instantiator should be used in a block context. This function gets the required info about the caller. It uses the frame stack: 0: this function 1: the instantiator decorator 2: the block function that defines instances 3: the caller of the block function, e.g. the BlockInstance. """ from myhdl import _block funcrec = inspect.stack()[2] name = funcrec[3] frame = funcrec[0] symdict = dict(frame.f_globals) symdict.update(frame.f_locals) modctxt = False callerrec = inspect.stack()[3] f_locals = callerrec[0].f_locals if 'self' in f_locals: modctxt = isinstance(f_locals['self'], _block._Block) return _CallInfo(name, modctxt, symdict) def instance(genfunc): callinfo = _getCallInfo() if not isinstance(genfunc, FunctionType): raise InstanceError(_error.ArgType) if not _isGenFunc(genfunc): raise InstanceError(_error.ArgType) if genfunc.__code__.co_argcount > 0: raise InstanceError(_error.NrOfArgs) return _Instantiator(genfunc, callinfo=callinfo) class _Instantiator(object): def __init__(self, genfunc, callinfo): self.callinfo = callinfo self.callername = callinfo.name self.modctxt = callinfo.modctxt self.genfunc = genfunc self.gen = genfunc() # infer symdict f = self.funcobj varnames = f.__code__.co_varnames symdict = {} for n, v in callinfo.symdict.items(): if n not in varnames: symdict[n] = v self.symdict = symdict # print modname, genfunc.__name__ tree = self.ast # print ast.dump(tree) v = _AttrRefTransformer(self) v.visit(tree) v = _SigNameVisitor(self.symdict) v.visit(tree) self.inputs = v.inputs self.outputs = v.outputs self.inouts = v.inouts self.embedded_func = v.embedded_func self.sigdict = v.sigdict self.losdict = v.losdict @property def name(self): return self.funcobj.__name__ @property def funcobj(self): return self.genfunc @property def waiter(self): return self._waiter()(self.gen) def _waiter(self): return _inferWaiter @property def ast(self): return _makeAST(self.funcobj)
#!/usr/bin/env python #__author__ = 'Andrew' from acomms import micromodem, unifiedlog import logging from time import sleep import argparse if __name__ == '__main__': ap = argparse.ArgumentParser(description ='Connect to a MM for testing purposes') ap.add_argument("logpath", help="Location of Log File", default="/home/acomms/") ap.add_argument("-C","--COM", help='COM Port to connect', default="/dev/ttyO1") ap.add_argument("-BR","--Baudrate", help="COM Port Baud Rate", default=19200) a
rgs = ap.parse_args() unified_log = unifiedlog.UnifiedLog(log_path=args.logpath, console_log_level=logging.INFO) um1 = micromodem.Micromodem(name='Micromodem2',unified_log=unified_log) um1.connect_serial(args.COM, args.Baudrate) try: while True: sleep(1
) finally: um1.disconnect()
-*- coding: utf-8 -*- # # complexity documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) cwd = os.getcwd() parent = os.path.dirname(cwd) sys.path.append(parent) import json_patch # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'django-json-patch' copyright = u'2015, Ashley Wilson' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = json_patch.__version__ # The full version, including alpha/beta/rc tags. release = json_patch.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'django-json-patchdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'django-json-patch.tex', u'django-json-patch Documentation', u'Ashley Wilson', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show
_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'django-json-patch', u'django
-json-patch Documentation', [u'Ashley Wilson'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'django-json-patch', u'django-json-patch Documentation', u'Ashley Wilson', 'django-json-patch', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu
#!/usr/bin/env python ''' Use threads and Netmiko to connect to each of the devices. Execute 'show version' on each device. Record the amount of time required to do this. ''' import threading from datetime import datetime from netmiko import ConnectHandler from my_devices import device_list as devices def show_ve
rsion(a_device): '''Execute show version command using Netmiko.''' remote_conn = ConnectHandler(**a_device) print print '#' * 80 print remote_conn.send_command_expect("show version") print '#' *
80 print def main(): ''' Use threads and Netmiko to connect to each of the devices. Execute 'show version' on each device. Record the amount of time required to do this. ''' start_time = datetime.now() for a_device in devices: my_thread = threading.Thread(target=show_version, args=(a_device,)) my_thread.start() main_thread = threading.currentThread() for some_thread in threading.enumerate(): if some_thread != main_thread: print some_thread some_thread.join() print "\nElapsed time: " + str(datetime.now() - start_time) if __name__ == "__main__": main()
# -*- coding: utf-8 -*- """ /*************************************************************************** Name : Omero RT Description : Omero plugin Date : August 15, 2010 copyright : (C) 2010 by Giuseppe Sucameli (Faunalia) email : sucameli@faunalia.it ***************************************************************************/ This code has been extracted and adapted from rt_omero plugin to be resused in rt_geosisma_offline plugin Works done from Faunalia (http://www.faunalia.it) with funding from Regione Toscana - Servizio Sismico (http://www.rete.toscana.it/sett/pta/sismica/) /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from PyQt4.QtCore import * from PyQt4.QtGui import * from qgis.core import * import qgis.gui class MapTool(QObject): canvas = None registeredToolStatusMsg = {} def __init__(self, mapToolClass, canvas=None): QObject.__init__(self) if canvas == None: if MapTool.canvas == None: raise Exception( "MapTool.canvas is None" ) else: self.canvas = MapTool.canvas else: self.canvas = canvas if MapTool.canvas == None: MapTool.canvas = canvas self.tool = mapToolClass( self.canvas ) QObject.connect(self.tool, SIGNAL( "geometryDrawingEnded" ), self.onEnd) def deleteLater(self): self.unregisterStatusMsg() self.stopCapture() self.tool.deleteLater() del self.tool return QObject.deleteLater(self) def registerStatusMsg(self, statusMessage): MapTool.registeredToolStatusMsg[self] = statusMessage def unregisterStatusMsg(self): if not MapTool.registeredToolStatusMsg.has_key( self ): return del MapTool.registeredToolStatusMsg[self] def onEnd(self, geometry): self.stopCapture() if geometry
== None: return self.emit( SIGNAL( "geometryEmitted" ), geometry ) def isActive(self): return self.canvas != None and self.canvas.mapTool() == self.tool def startCapture(self): self.canvas.setMapTool( self.tool ) def stopCapture(self): self.canvas.unsetMapTool( self.tool ) class Drawer(qgis.gui.QgsMapToolEmitPoint): def __init__(self, canvas, isPolygon=False): self.canvas = canvas self.isPolygon = isPolygon qgis.gui.QgsMapToolEmitPoint.__init__(self, self.
canvas) self.rubberBand = qgis.gui.QgsRubberBand( self.canvas, self.isPolygon ) self.rubberBand.setColor( Qt.red ) self.rubberBand.setBrushStyle(Qt.DiagCrossPattern) self.rubberBand.setWidth( 1 ) # imposta lo snap a snap to vertex with tollerance 0.9 map units customSnapOptions = { 'mode' : "to vertex", 'tolerance' : 0.3, 'unit' : 0 } self.oldSnapOptions = self.customizeSnapping( customSnapOptions ) self.snapper = qgis.gui.QgsMapCanvasSnapper( self.canvas ) self.isEmittingPoints = False def __del__(self): if self.oldSnapOptions: self.customizeSnapping( self.oldSnapOptions ) del self.rubberBand del self.snapper self.deleteLater() def reset(self): self.isEmittingPoints = False self.rubberBand.reset( self.isPolygon ) def customizeSnapping(self, option): oldSnap = {} settings = QSettings() oldSnap['mode'] = settings.value( "/Qgis/digitizing/default_snap_mode", "to vertex", type=str) oldSnap['tolerance'] = settings.value( "/Qgis/digitizing/default_snapping_tolerance", 0, type=float) oldSnap['unit'] = settings.value( "/Qgis/digitizing/default_snapping_tolerance_unit", 1, type=int ) settings.setValue( "/Qgis/digitizing/default_snap_mode", option['mode'] ) settings.setValue( "/Qgis/digitizing/default_snapping_tolerance", option['tolerance'] ) settings.setValue( "/Qgis/digitizing/default_snapping_tolerance_unit", option['unit'] ) return oldSnap def canvasPressEvent(self, e): if e.button() == Qt.RightButton: self.isEmittingPoints = False self.emit( SIGNAL("geometryDrawingEnded"), self.geometry() ) return if e.button() == Qt.LeftButton: self.isEmittingPoints = True else: return point = self.toMapCoordinates( e.pos() ) self.rubberBand.addPoint( point, True ) # true to update canvas self.rubberBand.show() def canvasMoveEvent(self, e): if not self.isEmittingPoints: return retval, snapResults = self.snapper.snapToBackgroundLayers( e.pos() ) if retval == 0 and len(snapResults) > 0: point = snapResults[0].snappedVertex else: point = self.toMapCoordinates( e.pos() ) self.rubberBand.movePoint( point ) def isValid(self): return self.rubberBand.numberOfVertices() > 0 def geometry(self): if not self.isValid(): return None geom = self.rubberBand.asGeometry() if geom == None: return return QgsGeometry.fromWkt( geom.exportToWkt() ) def deactivate(self): qgis.gui.QgsMapTool.deactivate(self) self.reset() self.emit(SIGNAL("deactivated()")) class FeatureFinder(MapTool): def __init__(self, canvas=None): MapTool.__init__(self, qgis.gui.QgsMapToolEmitPoint, canvas=canvas) QObject.connect(self.tool, SIGNAL( "canvasClicked(const QgsPoint &, Qt::MouseButton)" ), self.onEnd) def onEnd(self, point, button): self.stopCapture() self.emit( SIGNAL("pointEmitted"), point, button ) @classmethod def findAtPoint(self, layer, point, onlyTheClosestOne=True, onlyIds=False): QApplication.setOverrideCursor(QCursor(Qt.WaitCursor)) try: point = MapTool.canvas.mapSettings().mapToLayerCoordinates(layer, point) except: point = MapTool.canvas.mapRenderer().mapToLayerCoordinates(layer, point) # recupera il valore del raggio di ricerca settings = QSettings() radius = settings.value( "/Map/identifyRadius", QGis.DEFAULT_IDENTIFY_RADIUS, float ) if radius <= 0: # XXX: in QGis 1.8 QGis.DEFAULT_IDENTIFY_RADIUS is 0, # this cause the rectangle is empty and the select # returns all the features... radius = 0.5 # it means 0.50% of the canvas extent radius = MapTool.canvas.extent().width() * radius/100.0 # crea il rettangolo da usare per la ricerca rect = QgsRectangle() rect.setXMinimum(point.x() - radius) rect.setXMaximum(point.x() + radius) rect.setYMinimum(point.y() - radius) rect.setYMaximum(point.y() + radius) # recupera le feature che intersecano il rettangolo #layer.select([], rect, True, True) layer.select( rect, True ) ret = None if onlyTheClosestOne: minDist = -1 featureId = None rect2 = QgsGeometry.fromRect(rect) for f in layer.getFeatures(QgsFeatureRequest(rect)): if onlyTheClosestOne: geom = f.geometry() distance = geom.distance(rect2) if minDist < 0 or distance < minDist: minDist = distance featureId = f.id() if onlyIds: ret = featureId elif featureId != None: f = layer.getFeatures(QgsFeatureRequest().setFilterFid( featureId )) ret = f.next() else: IDs = [f.id() for f in layer.getFeatures(QgsFeatureRequest(rect))] if onlyIds: ret = IDs else: ret = [] for featureId in IDs: f = layer.getFeatures(QgsFeatureRequest().setFilterFid( featureId )) ret.append( f ) QApplication.restoreOverrideCursor() return ret class PolygonDrawer(MapTool): class PolygonDrawer(MapTool.Drawer): def __init__(self, canvas): MapTool.Drawer.__init__(self, canvas, QGis.Polygon) def __init__(self, canvas=None): MapTool.__init__(self, self.PolygonDrawer, canvas) class LineDrawer(MapTool): class LineDrawer(MapTool.Drawer): def __init__(self, canvas): MapTool.Drawer.__init__(self, canvas, QGis.Line) def __init__(self, canvas=None): MapTool.__init__(self, self.LineDrawer, canvas)
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( parse_duration, parse_iso8601, ) class HuajiaoIE(InfoExtractor): IE_DESC = '花椒直播' _VALID_URL = r'https?://(?:www\.)?huajiao\.com/l/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.huajiao.com/l/38941232', 'md5': 'd08bf9ac98787d24d1e4c0283f2d372d', 'info_dict': { 'id': '38941232', 'ext': 'mp4', 'title': '#新人求关注#', 'description': 're:.*', 'durat
ion': 2424.0, 'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1475866459, 'upload_date': '20161007', 'uploader': 'Penny_余姿昀', 'uploader_id': '75206005', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) feed_json = self._search_regex( r'var\s+feed\s*=\s*({.+})', webpage, 'feed json') feed = self._parse_json(feed_json, video_id) description = self._html_search_meta( 'description', webpage, 'description', fatal=False) def get(section, field): return feed.get(section, {}).get(field) return { 'id': video_id, 'title': feed['feed']['formated_title'], 'description': description, 'duration': parse_duration(get('feed', 'duration')), 'thumbnail': get('feed', 'image'), 'timestamp': parse_iso8601(feed.get('creatime'), ' '), 'uploader': get('author', 'nickname'), 'uploader_id': get('author', 'uid'), 'formats': self._extract_m3u8_formats( feed['feed']['m3u8'], video_id, 'mp4', 'm3u8_native'), }
import _plotly_utils.basevalidators
class A0Validator(_plotly_utils.basevalidators.NumberValidator): def __init__(self, p
lotly_name="a0", parent_name="carpet", **kwargs): super(A0Validator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), **kwargs )
from ... import BaseProvider class Provider(BaseProvider): """ Provider for Philippine IDs that are related to social security There is no unified social security program in the Philippines. Instead, the Philippines has a messy collection of social programs and IDs that, when put together, serves as an analogue of other countries' social security program. The government agencies responsible for these programs have relatively poor/outdated information and documentation on their respective websites, so the sources section include third party "unofficial" information. - Social Security System (SSS) - Social insurance program for workers in private, professional, and informal sectors - Government Service Insurance System (GSIS) - Social insurance program for government employees - Home Development Mutual Fund (popularly known as Pag-IBIG) - Socialized financial
assistance and loaning program - Philippine Health Insurance Corporation (PhilHeal
th) - Social insurance program for health care - Unified Multi-Purpose ID (UMID) - Identity card with common reference number (CRN) that serves as a link to the four previous programs and was planned to supersede the previous IDs, but its future is now uncertain because of the upcoming national ID system Sources: - https://www.sss.gov.ph/sss/DownloadContent?fileName=SSSForms_UMID_Application.pdf - https://www.gsis.gov.ph/active-members/benefits/ecard-plus/ - https://www.pagibigfund.gov.ph/DLForms/providentrelated/PFF039_MembersDataForm_V07.pdf - https://filipiknow.net/is-umid-and-sss-id-the-same/ - https://filipiknow.net/philhealth-number/ - https://en.wikipedia.org/wiki/Unified_Multi-Purpose_ID """ sss_formats = ('##-#######-#',) gsis_formats = ('###########',) philhealth_formats = ('##-#########-#',) pagibig_formats = ('####-####-####',) umid_formats = ('####-#######-#',) def sss(self): return self.numerify(self.random_element(self.sss_formats)) def gsis(self): return self.numerify(self.random_element(self.gsis_formats)) def pagibig(self): return self.numerify(self.random_element(self.pagibig_formats)) def philhealth(self): return self.numerify(self.random_element(self.philhealth_formats)) def umid(self): return self.numerify(self.random_element(self.umid_formats)) def ssn(self): # Use UMID as SSN in the interim till its deprecation return self.umid()
# -*- codin
g: utf-8 -*- # This file is meant to test that we can also load rules from __init__.py files, this was an issue with pypy before. from gitlint.rules import CommitR
ule class InitFileRule(CommitRule): name = "my-init-cömmit-rule" id = "UC1" options_spec = [] def validate(self, _commit): return []
<resOverlayFolders /> <includeSystemProguardFile>false</includeSystemProguardFile> <includeAssetsFromLibraries>true</includeAssetsFromLibraries> <additionalNativeLibs /> </configuration> </facet> </component>""" ALL_MODULES_XML_START = """<?xml version="1.0" encoding="UTF-8"?> <project version="4"> <component name="ProjectModuleManager"> <modules>""" ALL_MODULES_XML_END = """ </modules> </component> </project> """ LIBRARY_XML_START = """<component name="libraryTable"> <library name="%(name)s"> <CLASSES> <root url="jar://$PROJECT_DIR$/%(binary_jar)s!/" /> </CLASSES>""" LIBRARY_XML_WITH_JAVADOC = """ <JAVADOC> <root url="%(javadoc_url)s" /> </JAVADOC>""" LIBRARY_XML_NO_JAVADOC = """ <JAVADOC />""" LIBRARY_XML_WITH_SOURCES = """ <SOURCES> <root url="jar://$PROJECT_DIR$/%(source_jar)s!/" /> </SOURCES>""" LIBRARY_XML_NO_SOURCES = """ <SOURCES />""" LIBRARY_XML_END = """ </library> </component> """ RUN_CONFIG_XML_START = """<component name="ProjectRunConfigurationManager">""" RUN_CONFIG_XML_END = "</component>" REMOTE_RUN_CONFIG_XML = """ <configuration default="false" name="%(name)s" type="Remote" factoryName="Remote"> <option name="USE_SOCKET_TRANSPORT" value="true" /> <option name="SERVER_MODE" value="false" /> <option name="SHMEM_ADDRESS" value="javadebug" /> <option name="HOST" value="localhost" /> <option name="PORT" value="5005" /> <RunnerSettings RunnerId="Debug"> <option name="DEBUG_PORT" value="5005" /> <option name="TRANSPORT" value="0" /> <option name="LOCAL" value="false" /> </RunnerSettings> <ConfigurationWrapper RunnerId="Debug" /> <method /> </configuration> """ # Files that were written by this script. # If `buck project` is working properly, most of the time it will be a no-op # and no files will need to be written. MODIFIED_FILES = [] # Files that are part of the project being run. We will delete all .iml files # that are not checked in and not in this set. PROJECT_FILES = set() def write_modules(modules): """Writes one XML file for each module.""" for module in modules: # Build up the XML. module_type = 'JAVA_MODULE' if 'isIntelliJPlugin' in module and module['isIntelliJPlugin']: module_type = 'PLUGIN_MODULE' xml = MODULE_XML_START % { 'type': module_type, } # Android facet, if appropriate. if module.get('hasAndroidFacet') == True: if 'keystorePath' in module: keystore = 'file://$MODULE_DIR$/%s' % module['keystorePath'] else: keystore = '' if 'androidManifest' in module: android_manifest = module['androidManifest'] else: android_manifest = '/AndroidManifest.xml' is_library_project = module['isAndroidLibraryProject'] android_params = { 'android_manifest': android_manifest, 'res': '/res', 'is_android_library_project': str(is_library_project).lower(), 'run_proguard': 'false', 'module_gen_path': module['moduleGenPath'], 'proguard_config': '/proguard.cfg', 'keystore': keystore, 'libs_path' : '/%s' % module.get('nativeLibs', 'libs'), } xml += ANDROID_FACET % android_params # Source code and libraries component. xml += '\n <component name="NewModuleRootManager" inherit-compiler-output="true">' # Empirically, if there are multiple source folders, then the <content> element for the # buck-out/android/gen folder should be listed before the other source folders. num_source_folders = len(module['sourceFolders']) if num_source_folders > 1: xml = add_buck_android_source_folder(xml, module) # Source folders. xml += '\n <content url="file://$MODULE_DIR$">' for source_folder in module['sourceFolders']: if 'packagePrefix' in source_folder: package_prefix = 'packagePrefix="%s" ' % source_folder['packagePrefix'] else:
package_prefix = '' xml += '\n <sourceFolder url="%(url)s" isTestSource="%(is_test_source)s" %(package_prefix)s/>' % { 'url': source_folder['url'], 'is_test_source': str(source_folder['isTestSource']).lower(), 'package_prefix': package_prefix } for exclude_folder in module['excludeFolders']: xml += '\n <excludeFolder url="%s" />' % exclude_folder['url'] xml += '\n </content>' xml = add_annotation_genera
ted_source_folder(xml, module) # Empirically, if there is one source folder, then the <content> element for the # buck-out/android/gen folder should be listed after the other source folders. if num_source_folders <= 1: xml = add_buck_android_source_folder(xml, module) # Dependencies. dependencies = module['dependencies'] module_name = module['name'] # We need to filter out some of the modules in the dependency list: # (1) The module may list itself as a dependency with scope="TEST", which is bad. # (2) The module may list another module as a dependency with both COMPILE and TEST scopes, in # which case the COMPILE scope should win. # compile_dependencies will be the set of names of dependent modules that do not have scope="TEST" compile_dependencies = filter(lambda dep: dep['type'] == 'module' and ((not ('scope' in dep)) or dep['scope'] != 'TEST'), dependencies) compile_dependencies = map(lambda dep: dep['moduleName'], compile_dependencies) compile_dependencies = set(compile_dependencies) # Filter dependencies to satisfy (1) and (2) defined above. filtered_dependencies = [] for dep in dependencies: if dep['type'] != 'module': # Non-module dependencies should still be included. filtered_dependencies.append(dep) else: # dep must be a module dep_module_name = dep['moduleName'] if dep_module_name == module_name: # Exclude self-references! continue elif 'scope' in dep and dep['scope'] == 'TEST': # If this is a scope="TEST" module and the module is going to be included as # a scope="COMPILE" module, then exclude it. if not (dep_module_name in compile_dependencies): filtered_dependencies.append(dep) else: # Non-test modules should still be included. filtered_dependencies.append(dep) # Now that we have filtered the dependencies, we can convert the remaining ones directly into # XML. excluded_deps_names = set() if module_type == 'PLUGIN_MODULE': # all the jars below are parts of IntelliJ SDK and even though they are required # for language plugins to work standalone, they cannot be included as the plugin # module dependency because they would clash with IntelliJ excluded_deps_names = set([ 'annotations', # org/intellij/lang/annotations, org/jetbrains/annotations 'extensions', # com/intellij/openapi/extensions/ 'idea', # org/intellij, com/intellij 'jdom', # org/jdom 'junit', # junit/ 'light_psi_all', # light psi library 'openapi', # com/intellij/openapi 'picocontainer', # org/picocontainer 'trove4j', # gnu/trove 'util', # com/intellij/util ]) for dep in filtered_dependencies: if 'scope' in dep: dep_scope = 'scope="%s" ' % dep['scope'] else: dep_scope = '' dep_type = dep['type'] if dep_type == 'library': if dep['name'] in excluded_deps_names: continue xml += '\n <orderEntry type="library" exported="" %sname="%s" level="project" />' % (dep_scope, dep['name']) elif dep_type == 'module': dep_module_name = dep['moduleName'] # TODO(mbolin): Eliminate this special-case for jackson. It exists because jackson is not # an ordinary module: it is a module that functions as a library. Project.java should add it # as such in project.json to eliminate this special
# Generated by Django 2
.0.2 on 2018-03-13 02:52 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cc', '0003_auto_20180228_1145'), ] operations = [ migrations.AlterField( model_name='creditcard', name='tail_no', field=models.CharField(max_length=10),
), ]
#!/usr/bin/env python # -*- coding: utf-8 -*- """extends the standard Python gettext classes allows multiple simultaneous domains... (makes multiple sessions with different languages easier too)""" # Copyright 2002, 2003 St James Software # # This file is part of jToolkit. # # jToolkit is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # jToolkit is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with jToolkit; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import gettext import locale import os.path from errno import ENOENT from jToolkit import languagenames class ManyTranslations(gettext.NullTranslations): """this proxies to many translations""" def __init__(self, translations=None): """Takes an optional sequence of translations.""" gettext.NullTranslations.__init__(self) if translations is None: self.translations = [] else: self.translations = translations def gettext(self, mess
age): """gets the translation of the message by searching through all the domains""" for
translation in self.translations: tmsg = translation._catalog.get(message, None) if tmsg is not None: return tmsg return message def ngettext(self, singular, plural, n): """gets the plural translation of the message by searching through all the domains""" for translation in self.translations: if not hasattr(translation, "plural"): continue plural = translation.plural tmsg = translation._catalog[(singular, plural(n))] if tmsg is not None: return tmsg if n == 1: return singular else: return plural def ugettext(self, message): """gets the translation of the message by searching through all the domains (unicode version)""" for translation in self.translations: tmsg = translation._catalog.get(message, None) # TODO: we shouldn't set _charset like this. make sure it is set properly if translation._charset is None: translation._charset = 'UTF-8' if tmsg is not None: if isinstance(tmsg, unicode): return tmsg else: return unicode(tmsg, translation._charset) return unicode(message) def ungettext(self, singular, plural, n): """gets the plural translation of the message by searching through all the domains (unicode version)""" for translation in self.translations: if not hasattr(translation, "plural"): continue plural = translation.plural tmsg = translation._catalog.get((singular, plural(n)), None) # TODO: we shouldn't set _charset like this. make sure it is set properly if translation._charset is None: translation._charset = 'UTF-8' if tmsg is not None: if isinstance(tmsg, unicode): return tmsg else: return unicode(tmsg, translation._charset) if n == 1: return unicode(singular) else: return unicode(plural) def getinstalledlanguages(localedir): """looks in localedir and returns a list of languages installed there""" languages = [] def visit(arg, dirname, names): if 'LC_MESSAGES' in names: languages.append(os.path.basename(dirname)) os.path.walk(localedir, visit, None) return languages def getlanguagenames(languagecodes): """return a dictionary mapping the language code to the language name...""" return dict([(code, languagenames.languagenames.get(code, code)) for code in languagecodes]) def findmany(domains, localedir=None, languages=None): """same as gettext.find, but handles many domains, returns many mofiles (not just one)""" mofiles = [] if languages is None: languages = getinstalledlanguages(localedir) for domain in domains: mofile = gettext.find(domain, localedir, languages) mofiles.append(mofile) return mofiles def translation(domains, localedir=None, languages=None, class_=None): """same as gettext.translation, but handles many domains, returns a ManyTranslations object""" if class_ is None: class_ = gettext.GNUTranslations mofiles = findmany(domains, localedir, languages) # we'll just use null translations where domains are missing ; this code will refuse to # if None in mofiles: # missingindex = mofiles.index(None) # raise IOError(ENOENT, 'No translation file found for domain', domains[missingindex]) translations = [] for mofile in mofiles: if mofile is None: t = gettext.NullTranslations() t._catalog = {} else: key = os.path.abspath(mofile) t = gettext._translations.get(key) if t is None: t = gettext._translations.setdefault(key, class_(open(mofile, 'rb'))) translations.append(t) return ManyTranslations(translations) def getdefaultlanguage(languagelist): """tries to work out the default language from a list""" def reducelocale(locale): pos = locale.find('_') if pos == -1: return locale else: return locale[:pos] currentlocale, currentencoding = locale.getlocale() try: defaultlocale, defaultencoding = locale.getdefaultlocale() except ValueError: defaultlocale, defaultencoding = None, None if len(languagelist) > 0: if currentlocale is not None: if currentlocale in languagelist: return currentlocale elif reducelocale(currentlocale) in languagelist: return reducelocale(currentlocale) if defaultlocale is not None: if defaultlocale in languagelist: return defaultlocale elif reducelocale(defaultlocale) in languagelist: return reducelocale(defaultlocale) return languagelist[0] else: # if our language list is empty, we'll just ignore it if currentlocale is not None: return currentlocale elif defaultlocale is not None: return defaultlocale return None
import sys imp
ort cv2 import helper as hp class MSP(): name = "MSP" def __init__(self): self.__patterns_num = [] self.__patterns_sym = [] self.__labels_num = [] self.__labels_sym = [] msp_num, msp_sym = "msp/num", "msp/sym" self.__load_num_patterns(msp_num) self.__load_sym_patterns(msp_sym) print 'loading MSP...' def __load_num_patterns(self, input_dir): p
aths = hp.get_paths(input_dir) self.__patterns_num = [hp.get_gray_image(input_dir, path) for path in paths] self.__labels_num = [hp.get_test(path, "num")[0] for path in paths] def __load_sym_patterns(self, input_dir): paths = hp.get_paths(input_dir) self.__patterns_sym = [hp.get_gray_image(input_dir, path) for path in paths] self.__labels_sym = [hp.get_test(path, "sym")[0] for path in paths] def __get_mode(self, mode): if mode == "num": return self.__labels_num, self.__patterns_num elif mode == "sym": return self.__labels_sym, self.__patterns_sym def rec(self, img, mode): tmp_max, tmp, rec = sys.maxint, 0, 0 labels, patterns = self.__get_mode(mode) for pattern, label in zip(patterns, labels): tmp = cv2.countNonZero(pattern - img) if tmp < tmp_max: tmp_max, rec = tmp, label return rec