prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
m import Client as RbprmClient from hpp.corbaserver import Client as BasicClient import hpp.gepetto.blender.exportmotion as em ## Corba clients to the various servers # class CorbaClient: """ Container for corba clients to various interfaces. """ def __init__ (self): self.basic = BasicClient () self.rbprm = RbprmClient () ## Load and handle a RbprmDevice robot for rbprm planning # # A RbprmDevice robot is a dual representation of a robots. One robot describes the # trunk of the robot, and a set of robots describe the range of motion of each limb of the robot. class Builder (object): ## Constructor def __init__ (self, load = True): self.tf_root = "base_link" self.rootJointType = dict() self.client = CorbaClient () self.load = load ## Virtual function to load the robot model. # # \param urdfName urdf description of the robot trunk, # \param urdfNameroms either a string, or an array of strings, indicating the urdf of the different roms to add. # \param rootJointType type of root joint among ("freeflyer", "planar", # "anchor"), # \param meshPackageName name of the meshpackage from where the robot mesh will be loaded # \param packageName name of the package from where the robot will be loaded # \param urdfSuffix optional suffix for the urdf of the robot package # \param srdfSuffix optional suffix for the srdf of the robot package def loadModel (self, urdfName, urdfNameroms, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix): if(isinstance(urdfNameroms, list)): for urdfNamerom in urdfNameroms: self.client.rbprm.rbprm.loadRobotRomModel(urdfNamerom, rootJointType, packageName, urdfNamerom, urdfSuffix, srdfSuffix) else: self.client.rbprm.rbprm.loadRobotRomModel(urdfNameroms, rootJointType, packageName, urdfNameroms, urdfSuffix, srdfSuffix) self.client.rbprm.rbprm.loadRobotCompleteModel(urdfName, rootJointType, packageName, urdfName, urdfSuffix, srdfSuffix) self.name = urdfName self.displayName = urdfName self.tf_root = "base_link" self.rootJointType = rootJointType self.jointNames = self.client.basic.robot.getJointNames () self.allJointNames = self.client.basic.robot.getAllJointNames () self.client.basic.robot.meshPackageName = meshPackageName self.meshPackageName = meshPackageName self.rankInConfiguration = dict () self.rankInVelocity = dict () self.packageName = packageName self.urdfName = urdfName self.urdfSuffix = urdfSuffix self.srdfSuffix = srdfSuffix rankInConfiguration = rankInVelocity = 0 for j in self.jointNames: self.rankInConfiguration [j] = rankInConfiguration rankInConfiguration += self.client.basic.robot.getJointConfigSize (j) self.rankInVelocity [j] = rankInVelocity rankInVelocity += self.client.basic.robot.getJointNumberDof (j) ## Init RbprmShooter # def initshooter (self): return self.client.rbprm.rbprm.initshooter () ## Sets limits on robot orientation, described according to Euler's ZYX rotation order # # \param bounds 6D vector with the lower and upperBound for each rotation axis in sequence def boundSO3 (self, bounds): return self.client.rbprm.rbprm.boundSO3 (bounds) ## Specifies a preferred affordance for a given rom. # This constrains the planner to accept a rom configuration only if # it collides with a surface the normal of which has these properties. # # \param rom name of the rome, # \param affordances list of affordance names def setAffordanceFilter (self, rom, affordances): return self.client.rbprm.rbprm.setAffordanceFilter (rom, affordances) ## Specifies a rom constraint for the planner. # A configuration will be valid if and only if the considered rom collides # with the environment. # # \param romFilter array of roms indicated by name, which determine the constraint. def setFilter (self, romFilter): return self.client.rbprm.rbprm.setFilter (romFilter) ## Export a computed path for blender # # \param problem the problem associated with the path computed for the robot # \param stepsize increment along the path # \param pathId if of the considered path # \param filename name of the output file where to save the output def exportPath (self, viewer, problem, pathId, stepsize, filename): em.exportPath(viewer, self.client.basic.robot, problem, pathId, stepsize, filename) ## \name Degrees of freedom # \{ ## Get size of configuration # \return size of configuration def getConfigSize (self): return self.client.basic.robot.getConfigSize () # Get size of velocity # \return size of velocity def getNumberDof (self): return self.client.basic.robot.getNumberDof () ## \} ## \name Joints #\{ ## Get joint names in the same order as in the configuration. def getJointNames (self): return self.client.basic.robot.getJointNames () ## Get joint names in the same order as in the configuration. def getAllJointNames (self): return self.client.basic.robot.getAllJointNames () ## Get joint position. def getJointPosition (self, jointName): return self.client.basic.robot.getJoi
ntPosition (jointName) ## Set static position of joint in its parent frame def setJointPosition (self, jointName, position): return self.client.basic.robot.setJointPosition (jointName, position) ## Get joint number degrees of freedom.
def getJointNumberDof (self, jointName): return self.client.basic.robot.getJointNumberDof (jointName) ## Get joint number config size. def getJointConfigSize (self, jointName): return self.client.basic.robot.getJointConfigSize (jointName) ## set bounds for the joint def setJointBounds (self, jointName, inJointBound): return self.client.basic.robot.setJointBounds (jointName, inJointBound) ## Set bounds on the translation part of the freeflyer joint. # # Valid only if the robot has a freeflyer joint. def setTranslationBounds (self, xmin, xmax, ymin, ymax, zmin, zmax): self.client.basic.robot.setJointBounds \ (self.displayName + "base_joint_x", [xmin, xmax]) self.client.basic.robot.setJointBounds \ (self.displayName + "base_joint_y", [ymin, ymax]) self.client.basic.robot.setJointBounds \ (self.displayName + "base_joint_z", [zmin, zmax]) ## Get link position in joint frame # # Joints are oriented in a different way as in urdf standard since # rotation and uni-dimensional translation joints act around or along # their x-axis. This method returns the position of the urdf link in # world frame. # # \param jointName name of the joint # \return position of the link in world frame. def getLinkPosition (self, jointName): return self.client.basic.robot.getLinkPosition (jointName) ## Get link name # # \param jointName name of the joint, # \return name of the link. def getLinkName (self, jointName): return self.client.basic.robot.getLinkName (jointName) ## \} ## \name Access to current configuration #\{ ## Set current configuration of composite robot # # \param q configuration of the composite robot def setCurrentConfig (self, q): self.client.basic.robot.setCurrentConfig (q) ## Get current configuration of composite robot # # \return configuration of the composite robot def getCurrentConfig (self): return self.client.basic.robot.getCurrentConfig () ## Shoot random configuration # \return dofArray Array of degrees of freedom. def shootRandomConfig(self): return self.client.basic.robot.shootRandomConfig () ## \} ## \name Bodies # \{ ## Get the list of objects attached to a joint. # \param inJointName name of the joint. # \return list of names of CollisionObject attached to the body. def getJointInnerObjects (self, jointName): return self.client.basic.robot.getJointInnerObjec
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file e
xcept in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENS
E-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for ExportModel # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-aiplatform # [START aiplatform_generated_aiplatform_v1_ModelService_ExportModel_sync] from google.cloud import aiplatform_v1 def sample_export_model(): # Create a client client = aiplatform_v1.ModelServiceClient() # Initialize request argument(s) request = aiplatform_v1.ExportModelRequest( name="name_value", ) # Make the request operation = client.export_model(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response) # [END aiplatform_generated_aiplatform_v1_ModelService_ExportModel_sync]
import unittest, random, sys, time sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm def write_syn_libsvm_dataset(csvPathname, rowCount, colCount, SEED): r1 = random.Random(SEED) dsf = open(csvPathname, "w+") for i in range(rowCount): rowData = [] for j in range(colCount): ri = r1.randint(0,1) if ri!=0: # don't include 0's colNumber = j + 1 rowData.append(str(colNumber) + ":" + str(ri)) ri = r1.randint(0,1) # output class goes first rowData.insert(0, str(ri)) rowDataCsv = " ".join(rowData) # already all strings dsf.write(rowDataCsv + "\n") dsf.close() class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): global SEED SEED = h2o.setup_random_seed() h2o.init(1,java_heap_GB=10) @classmethod def tearDownClass(cls): ### time.sleep(3600) h2o.tear_down_cloud() def test_GLM2_many_cols_libsvm(self): SYNDATASETS_DIR = h2o.make_syn_dir() tryList = [ (100, 3000, 'cA', 300), (100, 5000, 'cB', 500), # too slow! # (100, 10000, 'cC', 800), ] ### h2b.browseTheCloud() lenNodes = len(h2o.nodes) for (rowCount, colCount, hex_key, timeoutSecs) in tryList: SEEDPERFILE = random.randint(0, sys.maxint) csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.svm' csvPathname = SYNDATASETS_DIR + '/' + csvFilename print "Creating random libsvm:", csvPathname write_syn_libsvm_dataset(csvPathname, rowCount, colCount, SEEDPERFILE) parseResult = h2i.import_parse(path=csvPathname, hex_key=hex_key, schema='put', timeoutSecs=timeoutSecs) print "Parse result['destination_key']:", parseResult['destination_key'] # We should be able to see the parse result? inspect = h2o_cmd.runInspect(None, parseResult['destination_key']) print "\n" + csvFilename y = colCount kwargs = {'response': y, 'max_iter': 2, 'n_folds': 1, 'alpha': 0.2, 'lambda': 1e-5} start = time.time() glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds' h2o_glm.si
mpleCheckGLM(self, glm, None, **kwargs) if __name__ == '__main__': h2o.unit_main()
an change_organizations mail status'), ('delete_mailstatus', 'Can delete mail status'), ('delete_mine_mailstatus', 'Can delete_mine mail status'), ('delete_organizations_mailstatus', 'Can delete_organizations mail status'), ('view_mailstatus', 'Can view mail status'), ('view_mine_mailstatus', 'Can view_mine mail status'), ('view_organizations_mailstatus', 'Can view_organizations mail status'), ], 'mail': [ ('add_mail', 'Can add mail'), ('change_mail', 'Can change mail'), ('change_mine_mail', 'Can change_mine mail'), ('change_organizations_mail', 'Can change_organizations mail'), ('delete_mail', 'Can delete mail'), ('delete_mine_mail', 'Can delete_mine mail'), ('delete_organizations_mail', 'Can delete_organizations mail'), ('view_mail', 'Can view mail'), ('view_mine_mail', 'Can view_mine mail'), ('view_organizations_mail', 'Can view_organizations mail'), ], 'message': [ ('add_message', 'Can add Message'), ('change_message', 'Can change Message'), ('change_mine_message', 'Can change_mine Message'), ('change_organizations_message', 'Can change_organizations Message'), ('delete_message', 'Can delete Message'), ('delete_mine_message', 'Can delete_mine Message'), ('delete_organizations_message', 'Can delete_organizations Message'), ('previewsend_message', 'Can previewsend Message'), ('previewsend_mine_message', 'Can previewsend_mine Message'), ('previewsend_organizations_message', 'Can previewsend_organizations Message'), ('view_message', 'Can view Message'), ('view_mine_message', 'Can view_mine Message'), ('view_organizations_message', 'Can view_organizations Message'), ], 'messageattachment': [ ('add_messageattachment', 'Can add message attachment'), ('change_messageattachment', 'Can change message attachment'), ('change_mine_messageattachment', 'Can change_mine message attachment'), ('change_organizations_messageattachment', 'Can change_organizations message attachment'), ('delete_messageattachment', 'Can delete message attachment'), ('delete_mine_messageattachment', 'Can delete_mine message attachment'), ('delete_organizations_messageattachment', 'Can delete_organizations message attachment'), ('view_messageattachment', 'Can view message attachment'), ('view_mine_messageattachment', 'Can view_mine message attachment'), ('view_organizations_messageattachment', 'Can view_organizations message attachment'), ], 'previewmail': [ ('add_previewmail', 'Can add preview mail'), ('change_mine_previewmail', 'Can change_mine preview mail'), ('change_organizations_previewmail', 'Can change_organizations preview mail'), ('change_previewmail', 'Can change preview mail'), ('delete_mine_previewmail', 'Can delete_mine preview mail'), ('delete_organizations_previewmail', 'Can delete_organizations preview mail'), ('delete_previewmail', 'Can delete preview mail'), ('view_mine_previewmail', 'Can view_mine preview mail'), ('view_organizations_previewmail', 'Can view_organizations preview mail'), ('view_previewmail', 'Can view preview mail'), ], } GROUP_PERMISSIONS = { 'administrators': { 'mailstatus': [ 'view_organizations_mailstatus', ], 'mail': [ 'add_mail', 'change_organizations_mail', 'delete_organizations_mail', 'view_organizations_mail', ], 'message': [ 'add_message', 'change_organizations_message', 'delete_organizations_message', 'previewsend_organizations_message', 'view_organizations_message', ], 'messageattachment': [ 'add_messageattachment', 'change_organizations_messageattachment', 'delete_organizations_messageattachment', 'view_organizations_messageattachment', ], 'previewmail': [ 'change_organizations_previewmail', 'delete_organizations_previewmail', 'view_organizations_previewmail', ], }, 'managers': { 'mailstatus': [ 'view_organizations_mailstatus', ], 'mail': [ 'add_mail', 'change_organizations_mail', 'delete_organizations_mail', 'view_organizations_mail', ], 'message': [ 'add_message', 'change_organizations_message', 'delete_organizations_message', 'previewsend_organizations_message', 'view_organizations_message', ], 'messageattachment': [ 'add_messageattachment', 'change_organizations_messageattachment', 'delete_organizations_messageattachment', 'view_organizations_messageattachment', ], 'previewmail': [ 'change_organizations_previewmail', 'delete_organizations_previewmail', 'view_organizations_previewmail', ], }, 'users': { 'mailstatus': [ 'view_mine_mailstatus', ], 'mail': [ 'add_mail', 'change_mine_mail', 'delete_mine_mail', 'view_mine_mail', ], 'message': [ 'add_message', 'change_mine_message', 'delete_mine_message', 'previewsend_mine_message', 'view_mine_message', ], 'messageattachment': [ 'add_messageattachment', 'change_mine_messageattachment', 'delete_mine_messageattachment', 'view_mine_messageattachment', ], 'previewmail': [ 'view_mine_previewmail', ], }, 'collaborators': { 'mailstatus': [ 'view_organizations_mailstatus', ], 'mail': [ 'add_mail', 'change_organizations_mail', 'delete_organizations_mail', 'view_organizations_mail', ], 'message': [ 'add_message', 'change_organizations_message', 'delete_organizations_message', 'previewsend_organizations_message', 'view_organizations_message', ], 'messageattachment': [ 'add_messageattachment', 'change_organizations_messageattachment', 'delete_organizations_messageattachment', 'view_organizations_messageattachment', ], 'previewmail': [ 'change_organizations_previewmail', 'delete_organizations_previewmail', 'view_organizations_previewmail', ], }, } def update_content_types(apps, schema_editor): db_alias = schema_editor.connection.alias emit_post_migrate_signal(False, 'default', db_alias) def load_permissions(apps, schema_editor): Group = apps.get_model('auth', 'group') Permission = apps.get_model('auth', 'permission') ContentType = apps.get_model('contenttypes', 'contenttype') # Delete previous permissions for model in PERMISSIONS: content_type = ContentType.objects.get( app_label='campaigns', model=model) Permission.objects.filter(content_type=content_type).delete() # Load permissions for model_name, permissions in PERMISSIONS.items(): for permission_codename, permission_name in permissions: content_type = ContentType.objects.get( app_label='campaigns', model=model_name) if not Permission.obj
ects.filter( codename=permission_codename, content_type=content_type).exists(): Permission.objects.create( name=permission_name, codename=permission_codename, content_type=content_type) # Group permissions for group_name, models in GROUP_PERMISSIONS.items():
group, _ = Group.objects.get_or_create(name=group_name) for model_name, permissions in models.items(): c
"""Testing chapter C creation""" self.chapter_c.update(self.plist) assert(type(self.chapter_c.content)==str), self.fail("Wrong type returned") #length calc header == 1 + 2 * length length_wait = 1 + 2 * len(self.plist) assert(len(self.chapter_c.content)==length_wait), self.fail("Wrong length returned") def test_update_1(self): self.plist.append([[176, 42, 100],6]) self.chapter_c.update(self.plist) length_wait = 1 + 2 * 127 assert(len(self.chapter_c.content)==length_wait), self.fail("Wrong length returned") def test_parse(self): """Test chapter C parsing""" self.chapter_c.update(self.plist) size, parsed_res, marker_s = self.chapter_c.parse(self.chapter_c.content) assert(len(parsed_res)==len(self.plist)), \ self.fail("Wrong number of command returned") for i in range(len(self.plist)): assert(parsed_res[i][0]==self.plist[i][0][0]), \ self.fail("Wrong value returned for cmd") assert(parsed_res[i][1]==self.plist[i][0][1]), \ self.fail("Wrong value returned for pitch") assert(parsed_res[i][2]==self.plist[i][0][2]), \ self.fail("Wrong value returned for velocity") def test_trim(self): plist = [] plist.append([[176, 42, 100],6]) plist.append([[176, 43, 100],7]) plist.append([[176, 44, 100],8]) self.chapter_c.update(plist) self.chapter_c.trim(7) assert(len(self.chapter_c.controllers)==1), self.fail("Problem erasing controllers on trim") def test_update_highest(self): plist = [] plist.append([[176, 42, 100],6]) plist.append([[176, 43, 100],7]) plist.append([[176, 44, 100],8]) self.chapter_c.update(plist) assert(self.chapter_c.highest==8), \ self.fail("Problem with highest on update") self.chapter_c.trim(7) assert(self.chapter_c.highest==8), \ self.fail("Problem with highest on trim(1)") self.chapter_c.trim(8) assert(self.chapter_c.highest==0), \ self.fail("Problem with highest on trim(2)") class TestChapterW(unittest.TestCase): def setUp(self): self.chapter_w = ChapterW() self.plist = [[[224, 0, 120], 6], [[224, 1, 110], 6]] def test_update(self): """Test create chapter W""" self.chapter_w.update(self.plist) assert(type(self.chapter_w.content)==str), self.fail("Wrong type returned") assert(len(self.chapter_w.content)==2), \ self.fail("Wrong size for chapter W part in recovery journal") def test_parse(self): self.chapter_w.update(self.plist) size, res_2, mark_s = self.chapter_w.parse(self.chapter_w.content) assert(mark_s == 1), \ self.fail("Wrong value for S bit in Chapter W") assert(res_2[0][2]==120), \ self.fail("Wrong value for wheel_1 in Chapter W") assert(res_2[1][2]==110), \ self.fail("Wrong value for wheel_2 in Chapter W") def test_trim(self): self.chapter_w.update(self.plist) self.chapter_w.trim(6) for data in self.chapter_w.data_list: assert(data[0]==0), self.fail("Problem trimming chapter") assert(self.chapter_w.highest==0), self.fail("Wrong update for highest") class TestChapterN(unittest.TestCase): def setUp(self): self.chapter_n = ChapterN() self.plist_on = [] self.plist_off = [] #List of notes to test #Note on for i in range(127): self.plist_on.append([[144, i, 100],6]) #Note off for i in range(127): self.plist_off.append([[128, i, 100],7]) def test_header(self): """Test Create header of chapterN """ #Creating chapter self.chapter_n.update(self.plist_on) res = self.chapter_n.header() #length type test assert(len(res)==2), self.fail("length of header is not good") assert(type(res)==str), self.fail("Wrong type return") def test_parse_header(self): """Test parse header of ChapterN""" #Creating chapter self.chapter_n.update(self.plist_off) res = self.chapter_n.header() #Parsing res_parsed = self.chapter_n.parse_header(res) #Testing type assert(type(res_parsed)==tuple), self.fail("Wrong type return") #Testing content assert(res_parsed[1]==0), \ self.fail("Problem getting good value of LEN") assert(res_parsed[2]==0), \ self.fail("Problem getting good value of LOW") assert(res_parsed[3]==15), \ self.fail("Problem getting good value of HIGH") def test_update(self): """Update with 127 note_off""" self.chapter_n.update(self.plist_off) #Test len content length_wait = 128 / 8 + 2 assert(len(self.chapter_n.content)==length_wait), \ self.fail("Wrong size for chapter encoded returned") #Test note_on assert(len(self.chapter_n.note_on)==0), \ self.fail("Wrong nb of note on recorded") #Test note_off assert(len(self.chapter_n.note_off)==127), \ self.fail("Wrong nb of note off recorded") #Test low assert(self.chapter_n.low==0), self.fail("Wrong low calculation") #Test high assert(self.chapter_n.high==15), self.fail("Wrong high calculation") #TEst highest assert(self.chapter_n.highest==7), self.fail("Wrong highest saved") def test_update_1(self): """Update with 127 note_on""" self.chapter_n.update(self.plist_on) #Test len content length_wait = 127 * 2 + 2 assert(len(self.chapter_n.content)==length_wait), \ self.fail("Wrong size for chapter encoded returned") #Test note_on assert(len(self.chapter_n.note_on)==127), \ self.fail("Wrong nb of note on recorded") #Test note_off assert(len(self.chapter_n.note_off)==0), \ self.fail("Wrong nb of note off recorded") #Test low assert(self.chapter_n.low==0), self.fail("Wrong low calculation") #Test high assert(self.chapter_n.high==0), self.fail("Wrong high calculation") #TEst highest assert(self.chapter_n.highest==6), self.fail("Wrong highest saved") def test_update_2(self): """Update with note_on / off and ...""" self.plist_on.append([[144, 42, 100],6]) self.chapter_n.update(self.plist_on) #Test len content length_wait = 127 * 2 + 2 assert(len(self.chapter_n.content)==length_wait), \ self.fail("Wrong size for chapter encoded returned") assert(len(self.chapter_n.note_on)==127), \ self.fail("Wrong nb of note on recorded") self.chapter_n.update(self.plist_off) #Test len content length_wait = 128 / 8 + 2 assert(len(self.chapter_n.content)==length_wait), \ self.fail("Wrong size for chapter encoded returned") #Test note_on assert(len(self.chapter_n.note_on)==0), \ self.fail("Wrong nb of note on recorded") #Test note_off assert(len(self.chapter_n.note_
off)==127), \ self.fail("Wrong nb of note off recorded") def test_parse(self): """ Test parse chapter N with several notes""" #creating chapter self.chapter_n.update(self.plist_off) size, notes_parsed = self.chapter_n.parse(self.chapter_n.content) assert(len(notes_parsed)==127), self.fail("Wrong number of notes returned") assert(size==18), self.fail("Wrong size of encoded chapter") def test_parse_2(self): off_mont = [[[128, 62, 100],
1000]] self.chapter_n.update(off_mont) size, notes_parsed = self.chapter_n.parse(self.chapter_n.content) def test_trim(self): self.chapter_n.update(self.plist_off) self.chapter_n.trim(6) #Test highest a
#!/usr/bin/env python from nose.tools import * from utilities import execution_path, run_all from utilities import side_by_side_image import os, mapnik import re def setup(): # All of the paths used are relative, if we run the tests # from another directory we need to chdir() os.chdir(execution_path('.')) def replace_style(m, name, style): m.remove_style(name) m.append_style(name, style) def test_append(): s = mapnik.Style() eq_(s.image_filters,'') s.image_filters = 'gray' eq_(s.image_filters,'gray') s.image_filters = 'sharpen' eq_(s.i
mage_filters,'sharpen') if 'shape' in mapnik.DatasourceCache.plugin_names(): def test_style_level_image_filter(): m = mapnik.Map(256, 256) mapnik.load_map(m, '../data/good_maps/style_level_image_filter.xml') m.zoom_all() successes = [] fails = [] for name in ("", "agg-stack-blur(2,2)", "blur",
"edge-detect", "emboss", "gray", "invert", "sharpen", "sobel", "x-gradient", "y-gradient"): if name == "": filename = "none" else: filename = re.sub(r"[^-_a-z.0-9]", "", name) # find_style returns a copy of the style object style_markers = m.find_style("markers") style_markers.image_filters = name style_labels = m.find_style("labels") style_labels.image_filters = name # replace the original style with the modified one replace_style(m, "markers", style_markers) replace_style(m, "labels", style_labels) im = mapnik.Image(m.width, m.height) mapnik.render(m, im) actual = '/tmp/mapnik-style-image-filter-' + filename + '.png' expected = 'images/style-image-filter/' + filename + '.png' im.save(actual,"png32") if not os.path.exists(expected): print 'generating expected test image: %s' % expected im.save(expected,'png32') expected_im = mapnik.Image.open(expected) # compare them if im.tostring('png32') == expected_im.tostring('png32'): successes.append(name) else: fails.append('failed comparing actual (%s) and expected(%s)' % (actual,'tests/python_tests/'+ expected)) fail_im = side_by_side_image(expected_im, im) fail_im.save('/tmp/mapnik-style-image-filter-' + filename + '.fail.png','png32') eq_(len(fails), 0, '\n'+'\n'.join(fails)) if __name__ == "__main__": setup() exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
_ops.equal(a_rank, 1), [a_rank]) new_shape = [1] * n new_shape[i] = -1 dtype = a.dtype if dtype == dtypes.bool: output.append(array_ops.reshape(nonzero(a)[0], new_shape)) elif dtype.is_integer: output.append(array_ops.reshape(a, new_shape)) else: raise ValueError( 'Only integer and bool dtypes are supported, got {}'.format(dtype)) return output @np_utils.np_doc('broadcast_arrays') def broadcast_arrays(*args, **kwargs): # pylint: disable=missing-docstring subok = kwargs.pop('subok', False) if subok: raise ValueError('subok=True is not supported.') if kwargs: raise ValueError('Received unsupported arguments {}'.format(kwargs.keys())) args = [asarray(arg) for arg in args] return np_utils.tf_broadcast(*args) @np_utils.np_doc_only('sign') def sign(x, out=None, where=None, **kwargs): # pylint: disable=missing-docstring,redefined-outer-name if out: raise ValueError('tf.numpy doesnt support setting out.') if where: raise ValueError('tf.numpy doesnt support setting where.') if kwargs: raise ValueError('tf.numpy doesnt support setting {}'.format(kwargs.keys())) x = asarray(x) dtype = x.dtype.as_numpy_dtype if np.issubdtype(dtype, np.complexfloating): result = math_ops.cast(math_ops.sign(math_ops.real(x)), dtype) else: result = math_ops.sign(x) return result # Note that np.take_along_axis may not be present in some supported versions of # numpy. @np_utils.np_doc('take_along_axis') def take_along_axis(arr, indices, axis): # pylint: disable=missing-docstring arr = asarray(arr) indices = asarray(indices) if axis is None: return take_along_axis(arr.ravel(), indices, 0) rank = array_ops.rank(arr) axis = axis + rank if ax
is < 0 else axis # Broadca
st shapes to match, ensure that the axis of interest is not # broadcast. arr_shape_original = array_ops.shape(arr) indices_shape_original = array_ops.shape(indices) arr_shape = array_ops.tensor_scatter_update(arr_shape_original, [[axis]], [1]) indices_shape = array_ops.tensor_scatter_update(indices_shape_original, [[axis]], [1]) broadcasted_shape = array_ops.broadcast_dynamic_shape(arr_shape, indices_shape) arr_shape = array_ops.tensor_scatter_update(broadcasted_shape, [[axis]], [arr_shape_original[axis]]) indices_shape = array_ops.tensor_scatter_update( broadcasted_shape, [[axis]], [indices_shape_original[axis]]) arr = array_ops.broadcast_to(arr, arr_shape) indices = array_ops.broadcast_to(indices, indices_shape) # Save indices shape so we can restore it later. possible_result_shape = indices.shape # Correct indices since gather doesn't correctly handle negative indices. indices = array_ops.where_v2(indices < 0, indices + arr_shape[axis], indices) swapaxes_ = lambda t: swapaxes(t, axis, -1) dont_move_axis_to_end = math_ops.equal(axis, np_utils.subtract(rank, 1)) arr = np_utils.cond(dont_move_axis_to_end, lambda: arr, lambda: swapaxes_(arr)) indices = np_utils.cond(dont_move_axis_to_end, lambda: indices, lambda: swapaxes_(indices)) arr_shape = array_ops.shape(arr) arr = array_ops.reshape(arr, [-1, arr_shape[-1]]) indices_shape = array_ops.shape(indices) indices = array_ops.reshape(indices, [-1, indices_shape[-1]]) result = array_ops.gather(arr, indices, batch_dims=1) result = array_ops.reshape(result, indices_shape) result = np_utils.cond(dont_move_axis_to_end, lambda: result, lambda: swapaxes_(result)) result.set_shape(possible_result_shape) return result _SLICE_ERORR = ( 'only integers, slices (`:`), ellipsis (`...`), ' 'numpy.newaxis (`None`) and integer or boolean arrays are valid indices') def _as_index(idx, need_scalar=True): """Helper function to parse idx as an index. Args: idx: index need_scalar: If idx needs to be a scalar value. Returns: A pair, (indx, bool). First one is the parsed index and can be a tensor, or scalar integer / Dimension. Second one is True if rank is known to be 0. Raises: IndexError: For incorrect indices. """ if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)): return idx, True data = asarray(idx) if data.dtype == dtypes.bool: if data.shape.ndims != 1: # TODO(agarwal): handle higher rank boolean masks. raise NotImplementedError('Need rank 1 for bool index %s' % idx) data = array_ops.where_v2(data) data = array_ops.reshape(data, [-1]) if need_scalar and data.shape.rank not in (None, 0): raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx)) np_dtype = data.dtype.as_numpy_dtype if not np.issubdtype(np_dtype, np.integer): raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx)) if data.dtype not in (dtypes.int64, dtypes.int32): # TF slicing can only handle int32/int64. So we need to cast. promoted_dtype = np.promote_types(np.int32, np_dtype) if promoted_dtype == np.int32: data = math_ops.cast(data, dtypes.int32) elif promoted_dtype == np.int64: data = math_ops.cast(data, dtypes.int64) else: raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx)) return data, data.shape.rank == 0 class _UpdateMethod(enum.Enum): UPDATE = 0 ADD = 1 MIN = 2 MAX = 3 def _slice_helper(tensor, slice_spec, update_method=None, updates=None): """Helper function for __getitem__ and _with_index_update_helper. This function collects the indices in `slice_spec` into two buckets, which we can call "idx1" and "idx2" here. idx1 is intended for `strided_slice`, idx2 `gather`. They also correspond to "basic indices" and "advanced indices" in numpy. This function supports both reading and writing at the indices. The reading path can be summarized as `gather(stride_slice(tensor, idx1), idx2)`. The writing path can be summarized as `strided_slice_update(tensor, idx1, scatter(strided_slice(tensor, idx1), idx2, updates))`. (`gather` here means `tf.gather` or `tf.gather_nd`; `scatter` here means `tf.tensor_scatter_update`.) The writing path is inefficient because it needs to first read out a portion (probably much larger than `updates`) of `tensor` using `strided_slice`, update it, and then write the portion back. An alternative approach is to only use `scatter`, which amounts to using the indexing mechanism of gather/scatter to implement strided_slice/strided_slice_update. This is feasible for XLA Gather/Scatter because they support spans (e.g. `2:5`) in indices (as begin/end pairs), but not TF gather/scatter because they don't support spans (except those that cover entire dimensions, i.e. `:`). If we materialize spans into individual indices, the size of the index tensor would explode. (Note that XLA Gather/Scatter have a similar problem for stride > 1 because they don't support strides. Indices such as `1:2:8` will need to be materialized into individual indices such as [1, 3, 5, 7].) Args: tensor: the tensor to be read from or write into. slice_spec: the indices. update_method: (optional) a member of `_UpdateMethod`, indicating how to update the values (replacement, add, etc.). `None` indicates just reading. updates: (optional) the new values to write into `tensor`. It must have the same dtype as `tensor`. Returns: The result of reading (if `update_method` is `None`) or the updated `tensor` after writing. """ begin, end, strides = [], [], [] new_axis_mask, shrink_axis_mask = 0, 0 begin_mask, end_mask = 0, 0 ellipsis_mask = 0 advanced_indices = [] shrink_indices = [] for index, s in enumerate(slice_spec): if isinstance(s, slice): if s.start is not None: begin.append(_as_index(s.start)[0]) else: begin.append(0) begin_mask |= (1 << index) if s.stop is not None: end.append(_as_index(s.stop)[0]) else: end.append(0) end_mask |= (1 << index)
CALONG' self.part2mod[self.modBpartIDs[i]]['channel']='LONG' self.part2mod[self.modBpartIDs[i]]['detector'] = 'NRCBLONG' elif i < 4: self.part2mo
d[self.modApartIDs[i]]['channel']='SHORT' self.part2mod[self.modApartIDs[i]]['detector']='NRCA'+str(i+1) self.part2mod[self.modBpartIDs[i]]['channel']='SHORT' self.part2mod[self.modBpartIDs[i]]['detector']='NRCB'+str(i+1) elif i > 4 and i < 9: self.part2mod[self.modApartIDs[i]]['channel']='SHORT'
self.part2mod[self.modApartIDs[i]]['detector']='NRCA'+str(i+1-5) self.part2mod[self.modBpartIDs[i]]['channel']='SHORT' self.part2mod[self.modBpartIDs[i]]['detector']='NRCB'+str(i+1-5) elif i > 9 and i < 14: self.part2mod[self.modApartIDs[i]]['channel']='SHORT' self.part2mod[self.modApartIDs[i]]['detector']='NRCA'+str(i+1-10) self.part2mod[self.modBpartIDs[i]]['channel']='SHORT' self.part2mod[self.modBpartIDs[i]]['detector']='NRCB'+str(i+1-10) def add_options(self, parser=None, usage=None): if parser == None: parser = optparse.OptionParser(usage=usage, conflict_handler="resolve") parser.add_option('-v', '--verbose', action="count", dest="verbose",default=0) parser.add_option('-o','--outfilebasename' , default='auto' , type="string", help='file basename of output file. If \'auto\', then basename is input filename with fits removed (default=%default)') parser.add_option('-d','--outdir' , default=None , type="string", help='if specified output directory (default=%default)') parser.add_option('-s','--outsubdir' , default=None , type="string", help='if specified gets added to output directory (default=%default)') parser.add_option('--outsuffix' , default=None , type="string", help='if specified: output suffix, otherwise _uncal.fits (default=%default)') return(parser) def copy_comments(self,filename): incomments = self.hdr['COMMENT'] return def copy_history(self,filename): return def mkoutfilebasename(self,filename, outfilebasename='auto',outdir=None,outsuffix=None,outsubdir=None): if outfilebasename.lower() == 'auto': outfilebasename = re.sub('\.fits$','',filename) if outfilebasename==filename: raise RuntimeError('BUG!!! %s=%s' % (outfilebasename,filename)) # new outdir? if outdir!=None: (d,f)=os.path.split(outfilebasename) outfilebasename = os.path.join(outdir,f) # append suffix? if outsuffix!=None: outfilebasename += '.'+outsuffix # add subdir? if outsubdir!=None: (d,f)=os.path.split(outfilebasename) outfilebasename = os.path.join(d,outsubdir,f) # make sure output dir exists dirname = os.path.dirname(outfilebasename) if dirname!='' and not os.path.isdir(dirname): os.makedirs(dirname) if not os.path.isdir(dirname): raise RuntimeError('ERROR: Cannot create directory %s' % dirname) return(outfilebasename) def cryo_update_meta_detector(self,runID=None,filename=None,reffileflag=True): if runID==None: runID=self.runID if runID=='TUCSONNEW': self.outputmodel.meta.instrument.module = self.hdr['MODULE'] if self.hdr['DETECTOR']=='SW': self.outputmodel.meta.instrument.channel = 'SHORT' elif self.hdr['DETECTOR']=='LW': self.outputmodel.meta.instrument.channel = 'LONG' else: raise RuntimeError('wrong DETECTOR=%s' % self.hdr['DETECTOR']) self.outputmodel.meta.instrument.detector = 'NRC%s%d' % (self.outputmodel.meta.instrument.module,self.hdr['SCA']) print('TEST!!!',self.outputmodel.meta.instrument.module,self.outputmodel.meta.instrument.channel,self.outputmodel.meta.instrument.detector) elif runID=='TUCSON_PARTNUM': idInFilename = filename[0:5] self.outputmodel.meta.instrument.detector = self.part2mod[idInFilename]['detector'] self.outputmodel.meta.instrument.channel = self.part2mod[idInFilename]['channel'] self.outputmodel.meta.instrument.module = self.part2mod[idInFilename]['module'] elif runID=='CRYO2' or runID=='CRYO3': detectorname=self.hdr['DETECTOR'] self.outputmodel.meta.instrument.filetype= 'UNCALIBRATED' if re.search('^NRCA',detectorname): self.outputmodel.meta.instrument.module = 'A' elif re.search('^NRCB',detectorname): self.outputmodel.meta.instrument.module = 'B' else: raise RuntimeError('wrong DETECTOR=%s' % detectorname) if re.search('LONG$',detectorname): self.outputmodel.meta.instrument.channel = 'LONG' else: self.outputmodel.meta.instrument.channel = 'SHORT' self.outputmodel.meta.instrument.detector = self.hdr['DETECTOR'] print(self.outputmodel.meta.instrument.module) print(self.outputmodel.meta.instrument.channel) print(self.outputmodel.meta.instrument.detector) elif runID=='CV2': if 'TLDYNEID' in self.hdr: detectorname=self.hdr['TLDYNEID'] elif 'SCA_ID' in self.hdr: detectorname=self.hdr['SCA_ID'] else: print('ERROR! could not get detector!!!') sys.exit(0) self.outputmodel.meta.instrument.detector = self.part2mod[detectorname]['detector'] self.outputmodel.meta.instrument.channel = self.part2mod[detectorname]['channel'] self.outputmodel.meta.instrument.module = self.part2mod[detectorname]['module'] # Below three lines added if 'DESCRIP' in self.hdr: print('DESCRIP already exist') elif reffileflag: self.outputmodel.meta.reffile.description = self.hdr['DESCRIPT'] #if reffileflag: # self.outputmodel.meta.reffile.description = self.hdr['DESCRIPT'] # #self.outputmodel.meta.reffile.author = self.hdr['AUTHOR'] elif runID=='CV3': if 'SCA_ID' in self.hdr: detectorname=self.hdr['SCA_ID'] else: print("ERROR! could not get detector!!!") self.outputmodel.meta.instrument.detector = self.part2mod[detectorname]['detector'] self.outputmodel.meta.instrument.channel = self.part2mod[detectorname]['channel'] self.outputmodel.meta.instrument.module = self.part2mod[detectorname]['module'] # Below three lines added if 'DESCRIP' in self.hdr: print('DESCRIP already exist') elif reffileflag: self.outputmodel.meta.reffile.description = self.hdr['DESCRIPT'] elif runID=='OTIS': if 'SCA_ID' in self.hdr: detectorname=self.hdr['SCA_ID'] else: print("ERROR! could not get detector!!!") self.outputmodel.meta.instrument.detector = self.part2mod[detectorname]['detector'] self.outputmodel.meta.instrument.channel = self.part2mod[detectorname]['channel'] self.outputmodel.meta.instrument.module = self.part2mod[detectorname]['module'] # Below three lines added if 'DESCRIP' in self.hdr: print('DESCRIP already exist') elif reffileflag: self.outputmodel.meta.reffile.description = self.hdr['DESCRIPT'] else: print('ERROR!!! dont know runID=%s' % runID) sys.exit(0) def getRunID(self,filename=None,hdr=None): if hdr!=None: if 'TERROIR' in hdr: if hdr['TERROI
#!/usr/bin/python import glob,re,sys,math,pyfits import numpy as np import utils if len( sys.argv ) < 2: print '\nconvert basti SSP models to ez_gal fits format' print 'Run in directory with SED models for one metallicity' print 'Usage: convert_basti.py ez_gal.ascii\n' sys.exit(2) fileout = sys.argv[1] # try to extract meta data out of fileout sfh = ''; tau = ''; met = ''; imf = '' # split on _ but get rid of the extension parts = '.'.join( fileout.split( '.' )[:-1] ).split( '_' ) # look for sfh for (check,val) in zip( ['ssp','exp'], ['SSP','Exponential'] ): if parts.count( check ): sfh = val sfh_index = parts.index( check ) break # tau? if sfh: tau = parts[sfh_index+1] if sfh == 'exp' else '' # metallicity if parts.count( 'z' ): met = parts[ parts.index( 'z' ) + 1 ] # imf for (check,val) in zip( ['krou','salp','chab'], ['Kroupa', 'Salpeter', 'Chabrier'] ): if parts.count( check ): imf = val break if parts.count( 'n' ): n = parts[ parts.index( 'n' ) + 1 ] ae = False if parts.count( 'ae' ): ae = True # does the file with masses exist? has_masses = False mass_file = glob.glob( 'MLR*.txt' ) if len( mass_file ): # read it in! print 'Loading masses from %s' % mass_file[0] data = utils.rascii( mass_file[0], silent=True ) masses = data[:,10:14].sum( axis=1 ) has_masses = True files = glob.glob( 'SPEC*agb*' ) nages = len( files ) ages = [] for (i,file) in enumerate(files): ls = [] this = [] # extract the age from the filename and convert to years m = re.search( 't60*(\d+)$', file ) age
s.append( int( m.group(1) )*1e6 ) # read in this file fp = open( file, 'r' ) for line in fp: parts = line.strip().split() ls.append( float( parts[0].strip() ) ) this.append( float( parts[1].strip() ) ) if i == 0: # if this is the first file, gen
erate the data table nls = len( ls ) seds = np.empty( (nls,nages) ) # convert to ergs/s/angstrom seds[:,i] = np.array( this )/4.3607e-33/1e10 # convert to numpy ages = np.array( ages ) ls = np.array( ls )*10.0 # make sure we are sorted in age sinds = ages.argsort() ages = ages[sinds] seds = seds[:,sinds] # speed of light c = utils.convert_length( utils.c, incoming='m', outgoing='a' ) # convert from angstroms to hertz vs = c/ls # convert from ergs/s/A to ergs/s/Hz seds *= ls.reshape( (ls.size,1) )**2.0/c # and now from ergs/s/Hz to ergs/s/Hz/cm^2.0 seds /= (4.0*math.pi*utils.convert_length( 10, incoming='pc', outgoing='cm' )**2.0) # sort in frequency space sinds = vs.argsort() # generate fits frame with sed in it primary_hdu = pyfits.PrimaryHDU(seds[sinds,:]) primary_hdu.header.update( 'units', 'ergs/s/cm^2/Hz' ) primary_hdu.header.update( 'has_seds', True ) primary_hdu.header.update( 'nfilters', 0 ) primary_hdu.header.update( 'nzfs', 0 ) # store meta data if sfh and met and imf: primary_hdu.header.update( 'has_meta', True ) primary_hdu.header.update( 'model', 'BaSTI', comment='meta data' ) primary_hdu.header.update( 'met', met, comment='meta data' ) primary_hdu.header.update( 'imf', imf, comment='meta data' ) primary_hdu.header.update( 'sfh', sfh, comment='meta data' ) if sfh == 'Exponential': primary_hdu.header.update( 'tau', tau, comment='meta data' ) primary_hdu.header.update( 'n', n, comment='meta data' ) primary_hdu.header.update( 'ae', ae, comment='meta data' ) # store the list of frequencies in a table vs_hdu = pyfits.new_table(pyfits.ColDefs([pyfits.Column(name='vs', array=vs[sinds], format='D', unit='hertz')])) vs_hdu.header.update( 'units', 'hertz' ) # and the list of ages cols = [pyfits.Column(name='ages', array=ages, format='D', unit='years')] # and masses if has_masses: cols.append( pyfits.Column(name='masses', array=masses, format='D', unit='m_sun') ) ages_hdu = pyfits.new_table(pyfits.ColDefs( cols )) if has_masses: ages_hdu.header.update( 'has_mass', True ) # make the fits file in memory hdulist = pyfits.HDUList( [primary_hdu,vs_hdu,ages_hdu] ) # and write it out hdulist.writeto( fileout, clobber=True )
from __future__ import print_function, absolute_import import numpy as np from numba import cuda, int32, float32 from numba.cuda.testing import unittest from numba.config import ENABLE_CUDASIM def useless_sync(ary): i = cuda.grid(1) cuda.syncthreads() ary[i] = i def simple_smem(ary): N = 100 sm = cuda.shared.array(N, int32) i = cuda.grid(1) if i == 0: for j in range(N): sm[j] = j cuda.syncthreads() ary[i] = sm[i] def coop_smem2d(ary): i, j = cuda.grid(2) sm = cuda.shared.array((10, 20), float32) sm[i, j] = (i + 1) / (j + 1) cuda.syncthreads() ary[i, j] = sm[i, j] def dyn_shared_memory(ary): i = cuda.grid(1) sm = cuda.shared.array(0, float32) sm[i] = i * 2 cuda.syncthreads() ary[i] = sm[i] def use_threadfence(ary): ary[0] += 123 cuda.threadfence() ary[0] += 321 def use_threadfence_block(ary): ary[0] += 123 cuda.threadfence_block() ary[0] += 321 def use_threadfence_system(ary): ary[0] += 123 cuda.threadfence_system() ary[0] += 321 class TestCudaSync(unittest.TestCase): def test_useless_sync(self): compiled = cuda.jit("void(int32[::1])")(useless_sync) nelem = 10 ary = np.empty(nelem, dtype=np.int32) exp = np.arange(nelem, dtype=np.int32) compiled[1, nelem](ary) self.assertTrue(np.all(ary == exp)) def test_simple_smem(self): compiled = cuda.jit("void(int32[::1])")(simple_smem) nelem = 100 ary = np.empty(nelem, dtype=np.int32) compiled[1, nelem](ary) self.assertTrue(np.all(ary == np.arange(nelem, dtype=
np.int32))) def test_coop_smem2d(self): compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d) shape = 10, 20 ary = np.empty(shape, dtype=np.float32) compiled[1, shape](ary)
exp = np.empty_like(ary) for i in range(ary.shape[0]): for j in range(ary.shape[1]): exp[i, j] = (i + 1) / (j + 1) self.assertTrue(np.allclose(ary, exp)) def test_dyn_shared_memory(self): compiled = cuda.jit("void(float32[::1])")(dyn_shared_memory) shape = 50 ary = np.empty(shape, dtype=np.float32) compiled[1, shape, 0, ary.size * 4](ary) self.assertTrue(np.all(ary == 2 * np.arange(ary.size, dtype=np.int32))) def test_threadfence_codegen(self): # Does not test runtime behavior, just the code generation. compiled = cuda.jit("void(int32[:])")(use_threadfence) ary = np.zeros(10, dtype=np.int32) compiled[1, 1](ary) self.assertEqual(123 + 321, ary[0]) if not ENABLE_CUDASIM: self.assertIn("membar.gl;", compiled.ptx) def test_threadfence_block_codegen(self): # Does not test runtime behavior, just the code generation. compiled = cuda.jit("void(int32[:])")(use_threadfence_block) ary = np.zeros(10, dtype=np.int32) compiled[1, 1](ary) self.assertEqual(123 + 321, ary[0]) if not ENABLE_CUDASIM: self.assertIn("membar.cta;", compiled.ptx) def test_threadfence_system_codegen(self): # Does not test runtime behavior, just the code generation. compiled = cuda.jit("void(int32[:])")(use_threadfence_system) ary = np.zeros(10, dtype=np.int32) compiled[1, 1](ary) self.assertEqual(123 + 321, ary[0]) if not ENABLE_CUDASIM: self.assertIn("membar.sys;", compiled.ptx) if __name__ == '__main__': unittest.main()
where(graph[connected_components_matrix] != 0) connected_components_matrix[node_to_add] = True if last_num_component >= connected_components_matrix.sum(): break return connected_components_matrix def _graph_is_connected(graph): """ Return whether the graph is connected (True) or Not (False) Parameters ---------- graph : array-like or sparse matrix, shape: (n_samples, n_samples) adjacency matrix of the graph, non-zero weight means an edge between the nodes Returns ------- is_connected : bool True means the graph is fully connected and False means not """ if sparse.isspmatrix(graph): # sparse graph, find all the connected components n_connected_components, _ = connected_components(graph) return n_connected_components == 1 else: # dense graph, find all connected components start from node 0 return _graph_connected_component(graph, 0).sum() == graph.shape[0] def _set_diag(laplacian, value): """Set the diagonal of the laplacian matrix and convert it to a sparse format well suited for eigenvalue decomposition Parameters ---------- laplacian : array or sparse matrix The graph laplacian value : float The value of the diagonal Returns ------- laplacian : array or sparse matrix An array of matrix in a form that is well suited to fast eigenvalue decomposition, depending on the band width of the matrix. """ n_nodes = laplacian.shape[0] # We need all entries in the diagonal to values if not sparse.isspmatrix(laplacian): laplacian.flat[::n_nodes + 1] = value else: laplacian = laplacian.tocoo() diag_idx = (laplacian.row == laplacian.col) laplacian.data[diag_idx] = value # If the matrix has a small number of diagonals (as in the # case of structured matrices coming from images), the # dia format might be best suited for matvec products: n_diags = np.unique(laplacian.row - laplacian.col).size if n_diags <= 7: # 3 or less outer diagonals on each side laplacian = laplacian.todia() else: # csr has the fastest matvec and is thus best suited to # arpack laplacian = laplacian.tocsr() return laplacian def spectral_embedding(adjacency, n_components=8, eigen_solver=None, random_state=None, eigen_tol=0.0, norm_laplacian=True, drop_first=True): """Project the sample on the first eigen vectors of the graph Laplacian. The adjacency matrix is used to compute a normalized graph Laplacian whose spectrum (especially the eigen vectors associated to the smallest eigen values) has an interpretation in terms of minimal number of cuts necessary to split the graph into comparably sized components. This embedding can also 'work' even if the ``adjacency`` variable is not strictly the adjacency matrix of a graph but more generally an affinity or similarity matrix between samples (fo
r instance the heat kernel of a euclidean distance matrix or a k-NN matrix). However care must taken
to always make the affinity matrix symmetric so that the eigen vector decomposition works as expected. Parameters ---------- adjacency : array-like or sparse matrix, shape: (n_samples, n_samples) The adjacency matrix of the graph to embed. n_components : integer, optional The dimension of the projection subspace. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'. By default, arpack is used. eigen_tol : float, optional, default=0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. drop_first : bool, optional, default=True Whether to drop the first eigenvector. For spectral embedding, this should be True as the first eigenvector should be constant vector for connected graph, but for spectral clustering, this should be kept as False to retain the first eigenvector. Returns ------- embedding : array, shape=(n_samples, n_components) The reduced samples. Notes ----- Spectral embedding is most useful when the graph has one connected component. If there graph has many components, the first few eigenvectors will simply uncover the connected components of the graph. References ---------- * http://en.wikipedia.org/wiki/LOBPCG * Toward the Optimal Preconditioned Eigensolver: Locally Optimal Block Preconditioned Conjugate Gradient Method Andrew V. Knyazev http://dx.doi.org/10.1137%2FS1064827500366124 """ try: from pyamg import smoothed_aggregation_solver except ImportError: if eigen_solver == "amg": raise ValueError("The eigen_solver was set to 'amg', but pyamg is " "not available.") if eigen_solver is None: eigen_solver = 'arpack' elif not eigen_solver in ('arpack', 'lobpcg', 'amg'): raise ValueError("Unknown value for eigen_solver: '%s'." "Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver) random_state = check_random_state(random_state) n_nodes = adjacency.shape[0] # Whether to drop the first eigenvector if drop_first: n_components = n_components + 1 # Check that the matrices given is symmetric if ((not sparse.isspmatrix(adjacency) and not np.all((adjacency - adjacency.T) < 1e-10)) or (sparse.isspmatrix(adjacency) and not np.all((adjacency - adjacency.T).data < 1e-10))): warnings.warn("Graph adjacency matrix should be symmetric. " "Converted to be symmetric by average with its " "transpose.") adjacency = .5 * (adjacency + adjacency.T) if not _graph_is_connected(adjacency): warnings.warn("Graph is not fully connected, spectral embedding" " may not work as expected.") laplacian, dd = graph_laplacian(adjacency, normed=norm_laplacian, return_diag=True) if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)): # lobpcg used with eigen_solver='amg' has bugs for low number of nodes # for details see the source code in scipy: # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen # /lobpcg/lobpcg.py#L237 # or matlab: # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m laplacian = _set_diag(laplacian, 1) # Here we'll use shift-invert mode for fast eigenvalues # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html # for a short explanation of what this means) # Because the normalized Laplacian has eigenvalues between 0 and 2, # I - L has eigenvalues between -1 and 1. ARPACK is most efficient # when finding eigenvalues of largest magnitude (keyword which='LM') # and when these eigenvalues are very large compared to the rest. # For very large, very sparse graphs, I - L can have many, many # eigenvalues very near 1.0. This leads to slow convergence. So # instead, we'll use ARPACK's shift-invert mode, asking for the # eigenvalues near 1.0. This effectively spreads-out the spectrum # near 1.0 and leads to much faster convergence: potentially an # orders-of-magnitude
ePreferences ) if not oozebanePreferences.activateOozebane.value: return gcodeText skein = OozebaneSkein() skein.parseGcode( gcodeText, oozebanePreferences ) return skein.output.getvalue() def writeOutput( fileName = '' ): "Oozebane a gcode linear move file. Chain oozebane the gcode if it is not already oozebaned. If no fileName is specified, oozebane the first unmodified gcode file in this folder." if fileName == '': unmodified = interpret.getGNUTranslatorFilesUnmodified() if len( unmodified ) == 0: print( "There are no unmodified gcode files in this folder." ) return fileName = unmodified[ 0 ] oozebanePreferences = OozebanePreferences() preferences.readPreferences( oozebanePreferences ) startTime = time.time() print( 'File ' + gcodec.getSummarizedFilename( fileName ) + ' is being chain oozebaned.' ) suffixFilename = fileName[ : fileName.rfind( '.' ) ] + '_oozebane.gcode' oozebaneGcode = getOozebaneChainGcode( fileName, '', oozebanePreferences ) if oozebaneGcode == '': return gcodec.writeFileText( suffixFilename, oozebaneGcode ) print( 'The oozebaned file is saved as ' + gcodec.getSummarizedFilename( suffixFilename ) ) analyze.writeOutput( suffixFilename, oozebaneGcode ) print( 'It took ' + str( int( round( time.time() - startTime ) ) ) + ' seconds to oozebane the file.' ) class OozebanePreferences: "A class to handle the oozebane preferences." def __init__( self ): "Set the default preferences, execute title & preferences fileName." #Set the default preferences. self.archive = [] self.activateOozebane = preferences.BooleanPreference().getFromValue( 'Activate Oozebane', False ) self.archive.append( self.activateOozebane ) self.afterStartupDistance = preferences.FloatPreference().getFromValue( 'After Startup Distance (millimeters):', 1.2 ) self.archive.append( self.afterStartupDistance ) self.earlyShutdownDistance = preferences.FloatPreference().getFromValue( 'Early Shutdown Distance (millimeters):', 1.2 ) self.archive.append( self.earlyShutdownDistance ) self.earlyStartupDistanceConstant = preferences.FloatPreference().getFromValue( 'Early Startup Distance Constant (millimeters):', 20.0 ) self.archive.append( self.earlyStartupDistanceConstant ) self.earlyStartupMaximumDistance = preferences.FloatPreference().getFromValue( 'Early Startup Maximum Distance (millimeters):', 1.2 ) self.archive.append( self.earlyStartupMaximumDistance ) self.firstEarlyStartupDistance = preferences.FloatPreference().getFromValue( 'First Early Startup Distance (millimeters):', 25.0 ) self.archive.append( self.firstEarlyStartupDistance ) self.fileNameInput = preferences.Filename().getFromFilename( interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File to be Oozebaned', '' ) self.archive.append( self.fileNameInput ) self.minimumDistanceForEarlyStartup = preferences.FloatPreference().getFromValue( 'Minimum Distance for Early Startup (millimeters):', 0.0 ) self.archive.append( self.minimumDistanceForEarlyStartup ) self.minimumDistanceForEarlyShutdown = preferences.FloatPreference().getFromValue( 'Minimum Distance for Early Shutdown (millimeters):', 0.0 ) self.archive.append( self.minimumDistanceForEarlyShutdown ) self.slowdownStartupSteps = preferences.IntPreference().getFromValue( 'Slowdown Startup Steps (positive integer):', 3 ) self.archive.append( self.slowdownStartupSteps ) #Create the archive, title of
the execute button, title of the dialog & preferences fileName. self.executeTitle = 'Oozebane' self.saveTitle = 'Save Preferences' preferences.setHelpPreferencesFileNameTitleWindowPosition(
self, 'skeinforge_tools.oozebane.html' ) def execute( self ): "Oozebane button has been clicked." fileNames = polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, interpret.getImportPluginFilenames(), self.fileNameInput.wasCancelled ) for fileName in fileNames: writeOutput( fileName ) class OozebaneSkein: "A class to oozebane a skein of extrusions." def __init__( self ): self.decimalPlacesCarried = 3 self.distanceFromThreadEndToThreadBeginning = None self.earlyStartupDistance = None self.extruderInactiveLongEnough = True self.feedrateMinute = 961.0 self.isExtruderActive = False self.isFirstExtrusion = True self.isShutdownEarly = False self.isStartupEarly = False self.lineIndex = 0 self.lines = None self.oldLocation = None self.operatingFeedrateMinute = 959.0 self.output = cStringIO.StringIO() self.shutdownStepIndex = 999999999 self.startupStepIndex = 999999999 def addAfterStartupLine( self, splitLine ): "Add the after startup lines." distanceAfterThreadBeginning = self.getDistanceAfterThreadBeginning() location = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine ) segment = self.oldLocation - location segmentLength = segment.magnitude() distanceBack = distanceAfterThreadBeginning - self.afterStartupDistances[ self.startupStepIndex ] if segmentLength > 0.0: locationBack = location + segment * distanceBack / segmentLength feedrate = self.operatingFeedrateMinute * self.afterStartupFlowRates[ self.startupStepIndex ] if not self.isCloseToEither( locationBack, location, self.oldLocation ): self.addLine( self.getLinearMoveWithFeedrate( feedrate, locationBack ) ) self.startupStepIndex += 1 def addLine( self, line ): "Add a line of text and a newline to the output." if line != '': self.output.write( line + "\n" ) def addLineSetShutdowns( self, line ): "Add a line and set the shutdown variables." self.addLine( line ) self.isShutdownEarly = True def getActiveFeedrateRatio( self ): "Get the feedrate of the first active move over the operating feedrate." isSearchExtruderActive = self.isExtruderActive for afterIndex in xrange( self.lineIndex, len( self.lines ) ): line = self.lines[ afterIndex ] splitLine = line.split() firstWord = gcodec.getFirstWord( splitLine ) if firstWord == 'G1': if isSearchExtruderActive: return gcodec.getFeedrateMinute( self.feedrateMinute, splitLine ) / self.operatingFeedrateMinute elif firstWord == 'M101': isSearchExtruderActive = True print( 'active feedrate ratio was not found in oozebane.' ) return 1.0 def getAddAfterStartupLines( self, line ): "Get and / or add after the startup lines." splitLine = line.split() while self.isDistanceAfterThreadBeginningGreater(): self.addAfterStartupLine( splitLine ) if self.startupStepIndex >= len( self.afterStartupDistances ): self.startupStepIndex = len( self.afterStartupDistances ) + 999999999999 return self.getLinearMoveWithFeedrateSplitLine( self.operatingFeedrateMinute, splitLine ) feedrate = self.operatingFeedrateMinute * self.getStartupFlowRateMultiplier( self.getDistanceAfterThreadBeginning() / self.afterStartupDistance, len( self.afterStartupDistances ) ) return self.getLinearMoveWithFeedrateSplitLine( feedrate, splitLine ) def getAddBeforeStartupLines( self, line ): "Get and / or add before the startup lines." distanceThreadBeginning = self.getDistanceToThreadBeginning() if distanceThreadBeginning == None: return line splitLine = line.split() self.extruderInactiveLongEnough = False self.isStartupEarly = True location = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine ) segment = self.oldLocation - location segmentLength = segment.magnitude() distanceBack = self.earlyStartupDistance - distanceThreadBeginning if segmentLength <= 0.0: print( 'This should never happen, segmentLength is zero in getAddBeforeStartupLines in oozebane.' ) print( line ) self.extruderInactiveLongEnough = True self.isStartupEarly = False return line locationBack = location + segment * distanceBack / segmentLength self.addLine( self.getLinearMoveWithFeedrate( self.operatingFeedrateMinute, locationBack ) ) self.addLine( 'M101' ) if self.isCloseToEither( locationBack, location, self.oldLocation ): return '' return self.getLinearMoveWithFeedrate( self.operatingFeedrateMinute, location ) def getAddShutSlowDownLine( self, line ): "Add the shutdown and slowdown lines." if self.shutd
__source__ = 'https://leetcode.com/problems/binary-tree-tilt/' # Time: O(n) # Space: O(n) # # Description: 563. Binary Tree Tilt # # Given a binary tree, return the tilt of the whole tree. # # The tilt of a tree node is defined as the absolute difference between the sum of all left subtree node values # and the sum of all right subtree node values. Null node has tilt 0. # # The tilt of the whole tree is defined as the sum of all nodes' tilt. # # Example: # Input: # 1 # / \ # 2 3 # Output: 1 # Explanation: # Tilt of node 2 : 0 # Tilt of node 3 : 0 # Tilt of node 1 : |2-3| = 1 # Tilt of binary tree : 0 + 0 + 1 = 1 # Note: # # The sum of node values in any subtree won't exceed the range of 32-bit integer. # All the tilt values won't exceed the range of 32-bit integer. #
Hide Company Tags Indeed # Hide Tags Tree # Explanation # If we had each node's subtree sum, # our answer would look like this psuedocode: # for each node: ans += abs(node.left.subtreesum - node.right.subtreesum). # Let _sum(node) be the node's subtree sum. # We can find it by adding the subtree sum of the left child, # plus the subtree sum of the right child, plus the node's value. # While we are visiting the node (each node is visited exactly once), # we might as
well do the ans += abs(left_sum - right_sum) part. import unittest # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None # 48ms 97.16% class Solution(object): def findTilt(self, root): """ :type root: TreeNode :rtype: int """ self.ans = 0 def _sum(node): if not node: return 0 left, right = _sum(node.left), _sum(node.right) self.ans += abs(left - right) return node.val + left + right _sum(root) return self.ans # your function here class TestMethods(unittest.TestCase): def test_Local(self): self.assertEqual(1, 1) if __name__ == '__main__': unittest.main() Java = ''' # Thought: https://leetcode.com/problems/binary-tree-tilt/solution/ # Time complexity : O(n). where nn is the number of nodes. Each node is visited once. Space complexity : O(n). In worst case when the tree is skewed depth of tree will be nn. In average case depth will be lognlogn. post-order traversal /** * Definition for a binary tree node. * public class TreeNode { * int val; * TreeNode left; * TreeNode right; * TreeNode(int x) { val = x; } * } */ # 3ms 100% class Solution { int res = 0; public int findTilt(TreeNode root) { postOrder(root); return res; } private int postOrder(TreeNode root) { if (root == null) return 0; int left = postOrder(root.left); int right = postOrder(root.right); res += Math.abs(left - right); return left + right + root.val; } } '''
f._expr_() self.ast['rvalue'] = self.last_node self._cut() self._statement_() self.ast['body'] = self.last_node self.ast._define( ['rvalue', 'body'], [] ) @graken() def _casestatement_(self): with self._group(): with self._choice(): with self._option(): with self._group(): self._token('case') self._constantexpr_() self.ast['cond'] = self.last_node with self._option(): self._token('default') self._error('expecting one of: default') self._cut() self._token(':') self._statement_() self.ast['then'] = self.last_node self.ast._define( ['cond', 'then'], [] ) @graken() def _breakstatement_(self): self._token('break') self._token(';') @graken() def _autostatement_(self): self._token('auto') self._cut() self._autovar_() self.ast.setlist('@', self.last_node) def block1(): self._token(',') self._autovar_() self.ast.setlist('@', self.last_nod
e) self._closure(block1) self._token(';') @graken() def _autovar_(self):
self._name_() self.ast['name'] = self.last_node with self._optional(): self._token('[') self._constantexpr_() self.ast['maxidx'] = self.last_node self._token(']') self.ast._define( ['name', 'maxidx'], [] ) @graken() def _extrnstatement_(self): self._token('extrn') self._cut() self._namelist_() self.ast['@'] = self.last_node self._token(';') @graken() def _compoundstatement_(self): self._token('{') self._cut() def block1(): self._statement_() self._cut() self._closure(block1) self.ast['@'] = self.last_node self._token('}') @graken() def _ifstatement_(self): self._token('if') self._cut() self._token('(') self._expr_() self.ast['cond'] = self.last_node self._token(')') self._statement_() self.ast['then'] = self.last_node with self._optional(): self._token('else') self._statement_() self.ast['otherwise'] = self.last_node self.ast._define( ['cond', 'then', 'otherwise'], [] ) @graken() def _whilestatement_(self): self._token('while') self._cut() self._token('(') self._expr_() self.ast['cond'] = self.last_node self._token(')') self._statement_() self.ast['body'] = self.last_node self.ast._define( ['cond', 'body'], [] ) @graken() def _returnstatement_(self): self._token('return') self._cut() with self._optional(): self._token('(') self._expr_() self.ast['return_value'] = self.last_node self._token(')') self._token(';') self.ast._define( ['return_value'], [] ) @graken() def _exprstatement_(self): self._expr_() self.ast['@'] = self.last_node self._token(';') @graken() def _nullstatement_(self): self._token(';') @graken() def _expr_(self): self._assignexpr_() @graken() def _assignexpr_(self): self._condexpr_() self.ast['lhs'] = self.last_node with self._optional(): self._assignop_() self.ast['op'] = self.last_node self._assignexpr_() self.ast['rhs'] = self.last_node self.ast._define( ['lhs', 'op', 'rhs'], [] ) @graken() def _assignop_(self): self._pattern(r'=([+\-/\*%&^|]|[=!]=|>[=>]?|<[=<]?)?') @graken() def _condexpr_(self): self._orexpr_() self.ast['cond'] = self.last_node with self._optional(): self._token('?') self._condexpr_() self.ast['then'] = self.last_node self._token(':') self._condexpr_() self.ast['otherwise'] = self.last_node self.ast._define( ['cond', 'then', 'otherwise'], [] ) @graken() def _orexpr_(self): self._xorexpr_() self.ast['lhs'] = self.last_node def block2(): self._ortail_() self._closure(block2) self.ast['tail'] = self.last_node self.ast._define( ['lhs', 'tail'], [] ) @graken() def _ortail_(self): self._token('|') self.ast['op'] = self.last_node self._xorexpr_() self.ast['rhs'] = self.last_node self.ast._define( ['op', 'rhs'], [] ) @graken() def _xorexpr_(self): self._andexpr_() self.ast['lhs'] = self.last_node def block2(): self._xortail_() self._closure(block2) self.ast['tail'] = self.last_node self.ast._define( ['lhs', 'tail'], [] ) @graken() def _xortail_(self): self._token('^') self.ast['op'] = self.last_node self._andexpr_() self.ast['rhs'] = self.last_node self.ast._define( ['op', 'rhs'], [] ) @graken() def _andexpr_(self): self._eqexpr_() self.ast['lhs'] = self.last_node def block2(): self._andtail_() self._closure(block2) self.ast['tail'] = self.last_node self.ast._define( ['lhs', 'tail'], [] ) @graken() def _andtail_(self): self._token('&') self.ast['op'] = self.last_node self._eqexpr_() self.ast['rhs'] = self.last_node self.ast._define( ['op', 'rhs'], [] ) @graken() def _eqexpr_(self): self._relexpr_() self.ast['lhs'] = self.last_node def block2(): self._eqtail_() self._closure(block2) self.ast['tail'] = self.last_node self.ast._define( ['lhs', 'tail'], [] ) @graken() def _eqtail_(self): self._eqop_() self.ast['op'] = self.last_node self._relexpr_() self.ast['rhs'] = self.last_node self.ast._define( ['op', 'rhs'], [] ) @graken() def _eqop_(self): self._pattern(r'[!=]=') @graken() def _relexpr_(self): self._shiftexpr_() self.ast['lhs'] = self.last_node def block2(): self._reltail_() self._closure(block2) self.ast['tail'] = self.last_node self.ast._define( ['lhs', 'tail'], [] ) @graken() def _reltail_(self): self._relop_() self.ast['op'] = self.last_node self._shiftexpr_() self.ast['rhs'] = self.last_node self.ast._define( ['op', 'rhs'], [] ) @graken() def _relop_(self): self._pattern(r'[<>]={0,1}') @graken() def _shiftexpr_(self): self._addexpr_() self.ast['lhs'] = self.last_node def block2(): self._shifttail_() self._closure(block2) self.ast['tail'] = self.last_node self.ast._define( ['lhs', 'tail'], [] ) @graken() def _shifttail_(self): self._shiftop_() self.ast['op'] = self.last_node self._addexpr_() self.ast['rhs'] = self.last_node self.ast._define( ['op', 'rhs'], [] ) @graken() def _shiftop_(self): self._pattern(r'<<|>>'
# coding=utf-8 # -----------------------------
--------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regene
rated. # -------------------------------------------------------------------------- from msrest.serialization import Model class VaultSecretGroup(Model): """Describes a set of certificates which are all in the same Key Vault. :param source_vault: The relative URL of the Key Vault containing all of the certificates in VaultCertificates. :type source_vault: ~azure.mgmt.compute.v2016_03_30.models.SubResource :param vault_certificates: The list of key vault references in SourceVault which contain certificates. :type vault_certificates: list[~azure.mgmt.compute.v2016_03_30.models.VaultCertificate] """ _attribute_map = { 'source_vault': {'key': 'sourceVault', 'type': 'SubResource'}, 'vault_certificates': {'key': 'vaultCertificates', 'type': '[VaultCertificate]'}, } def __init__(self, *, source_vault=None, vault_certificates=None, **kwargs) -> None: super(VaultSecretGroup, self).__init__(**kwargs) self.source_vault = source_vault self.vault_certificates = vault_certificates
""" WSGI config for mjuna project. It exposes the WSGI ca
llable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_M
ODULE", "mjuna.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
ntic, status, message = self.authenticator.is_authentic(request) self.assertFalse(authentic) self.assertEqual(401, status) self.assertEqual("", message) @patch.object(RemoteAuthenticator, "signature_valid") @patch.object(RemoteAuthenticator, "authentication_present") @patch.object(RemoteAuthenticator, "time_valid") @patch.object(RemoteAuthenticator, "token_valid") def test_is_authentic_some_time_invalid(self, token_valid, time_valid, authentication_present, signature_valid): """RemoteAuthenticator: We get a False back if time invalid""" request = mock.Mock(headers={settings.x_mws_time: self.mws_time, settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid}, path="/mauth/v2/mauth.json?open=1", method="GET", data="") token_valid.return_value = True time_valid.side_effect = InauthenticError("") authentication_present.return_value = True signature_valid.return_value = True authentic, status, message = self.authenticator.is_authentic(request) self.assertFalse(authentic) self.assertEqual(401, status) self.assertEqual("", message) @patch.object(RemoteAuthenticator, "signature_valid") @patch.object(RemoteAuthenticator, "authentication_present") @patch.object(RemoteAuthenticator, "time_valid") @patch.object(RemoteAuthenticator, "token_valid") def test_is_authentic_some_authentication_missing(self, token_valid, time_valid, authentication_present, signature_valid): """RemoteAuthenticator: We get a False back if mauth missing""" request = mock.Mock(headers={settings.x_mws_time: self.mws_time, settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid}, path="/mauth/v2/mauth.json?open=1", method="GET", data="") token_valid.return_value = True time_valid.return_value = True authentication_present.side_effect = InauthenticError("") signature_valid.return_value = True authentic, status, message = self.authenticator.is_authentic(request) self.assertFalse(authentic) self.assertEqual(401, status) self.assertEqual("", message) @patch.object(RemoteAuthenticator, "signature_valid") @patch.object(RemoteAuthenticator, "authentication_present") @patch.object(RemoteAuthenticator, "time_valid") @patch.object(RemoteAuthenticator, "token_valid") def test_is_authentic_some_signature_invalid(self, token_valid, time_valid, authentication_present, signature_valid): """RemoteAuthenticator: We get a False back if signature invalid""" request = mock.Mock(headers={settings.x_mws_time: self.mws_time, settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid}, path="/mauth/v2/mauth.json?open=1", method="GET", data="") token_valid.return_value = True time_valid.return_value = True authentication_present.return_value = True signature_valid.side_effect = InauthenticError("") authentic, status, message = self.authenticator.is_authentic(request) self.assertFalse(authentic) self.assertEqual(401, status) self.assertEqual("", message) @patch.object(RemoteAuthenticator, "signature_valid") @patch.object(RemoteAuthenticator, "authentication_present") @patch.object(RemoteAuthenticator, "time_valid") @patch.object(RemoteAuthenticator, "token_valid") def test_authenticate_is_ok(self, token_valid, time_valid, authentication_present, signature_valid): """RemoteAuthenticator: We get a True back if all tests pass""" request = mock.Mock(headers={settings.x_mws_time: self.mws_time, settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid}, path="/mauth/v2/mauth.json?open=1", method="GET", data="") token_valid.return_value = True time_valid.return_value = True authentication_present.return_value = True signature_valid.return_value = True authentic = self.authenticator.authenticate(request) self.assertTrue(authentic) @patch.object(RemoteAuthenticator, "signature_valid") @patch.object(RemoteAuthenticator, "authentication_present") @patch.object(RemoteAuthenticator, "time_valid") @patch.object(RemoteAuthenticator, "token_valid") def test_authenticate_fails(self, token_valid, time_valid, authentication_present, signature_valid): """RemoteAuthenticator: We get a False back if any tests fail""" request = mock.Mock(headers={settings.x_mws_time: self.mws_time, settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid}, path="/mauth/v2/mauth.json?open=1", method="GET", data="") token_valid.return_value = True time_valid.return_value = True authentication_present.return_value = True signature_valid.return_value = False authentic = self.authenticat
or.authenticate(request) self.assertFalse(authentic) def test_authentication_type(self): """We self-describe""" self.assertEqual('REMOTE', self.authenticator.authenticator_type) class TestLocalAuthenticator(_TestAuthenticator, TestCase): def setUp(self): self.logger = mock.Mock() self.authenticator = LocalAuthenticator(mauth_auth=mock.Mock(),
logger=self.logger, mauth_api_version='v2', mauth_base_url='https://mauth-sandbox.imedidata.net') self.mws_time = "1479392498" self.app_uuid = 'b0603e5c-c344-488e-83ba-9290ea8dc17d' def generate_headers(self, verb, path, body, mws_time=None, app_uuid=None, keytype='pkcs1'): """ Generates a Signature String :param verb: HTTP verb, eg GET :param path: URL Path (without query strings) :param body: Body of request :param time: :param app_uuid: :return: """ if mws_time is None: mws_time = self.mws_time if app_uuid is None: app_uuid = self.app_uuid key_suffix = "priv" if keytype == 'pkcs15': key_suffix = "priv_pkcs15" signer = requests_mauth.MAuth(app_uuid=app_uuid, private_key_data=load_key(key_suffix)) signature_string, seconds_since_epoch = signer.make_signature_string(verb=verb, url_path=path, body=body, seconds_since_epoch=mws_time) signed_string = signer.signer.sign(signature_string) auth_headers = signer.make_authentication_headers(signed_string, mws_time) return auth_headers def test_authenticates_a_genuine_message(self): """Given an authentic message, we authenticate""" mws_time = int(time.time()) headers = self.generate_headers("GET", "/mauth/v2/mauth.json", "", mws_time) request = mock.Mock(headers=headers, path="/mauth/v2/mauth.json?open=1", method="GET", data="") with mock.patch("flask_mauth.mauth.authenticators.SecurityTokenCacher") as tok: cacher = tok.return_value cacher.get.return_value = dict(app_name="Apple",
# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.core.urlresolvers import reverse from django.template import defaultfilters from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon_lib import tables from openstack_horizon import api from openstack_horizon.dashboards.identity.groups import constants LOG = logging.getLogger(__name__) LOGOUT_URL = 'logout' STATUS_CHOICES = ( ("true", True), ("false", False) ) class CreateGroupLink(tables.LinkAction): name = "create" verbose_name = _("Create Group") url = constants.GROUPS_CREATE_URL classes = ("ajax-modal",) icon = "plus" policy_rules = (("identity", "identity:create_group"),) def allowed(self, request, group): return api.keystone.keystone_can_edit_group() class EditGroupLink(tables.LinkAction): name = "edit" verbose_name = _("Edit Group") url = constants.GROUPS_UPDATE_URL classes = ("ajax-modal",) icon = "pencil" policy_rules = (("identity", "identity:update_group"),) def allowed(self, request, group): return api.keystone.keystone_can_edit_group() class DeleteGroupsAction(tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u"Delete Group", u"Delete Groups", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Deleted Group", u"Deleted Groups", count ) name = "delete" policy_rules = (("identity", "identity:delete_group"),) def allowed(self, request, datum): return api.keystone.keystone_can_edit_group() def delete(self, request, obj_id): LOG.info('Deleting group "%s".' % obj_id) api.keystone.group_delete(request, obj_id) class ManageUsersLink(tables.LinkAction): name = "users" verbose_name = _("Modify Users") url = constants.GROUPS_MANAGE_URL icon = "pencil" policy_rules = (("identity", "identity:get_group"), ("identity", "identity:list_users"),) def allowed(self, request, datum): return api.keystone.keystone_can_edit_group() class GroupFilterAction(tables.FilterAction): def filter(self, table, groups, filter_string): """Naive case-insensitive search.""" q = filter_string.lower() def comp(group): if q in group.name.lower(): return True return False return filter(comp, groups) class GroupsTable(tables.DataTable): name = tables.Column('name', verbose_name=_('Name')) description = tables.Column(lambda obj: getattr(obj, 'description', None), verbose_name=_('Description')) id = tables.Column('id', verbose_name=_('Group ID')) class Meta: name = "groups" verbose_name = _("Groups") row_actions = (ManageUsersLink, EditGroupLink, DeleteGroupsAction) table_actions = (GroupFilterAction, CreateGroupLink, DeleteGroupsAction) class UserFilterAction(tables.FilterAction): def filter(self, table, users, filter_string): """Naive case-insensitive search.""" q = filter_string.lower() return [user for user in users if q in user.name.lower() or q in getattr(user, 'email', '').lower()] class RemoveMembers(tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u"Remove User", u"Remove Users", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Removed User", u"Removed Users", count ) name = "removeGroupMember" policy_rules = (("identity", "identity:remove_user_from_group"),) def allowed(self, request, user=None): return api.keystone.keystone_can_edit_group() def action(self, request, obj_id): user_obj = self.table.get_object_by_id(obj_id) group_id = self.table.kwargs['group_id'] LOG.info('Removing user %s from group %s.' % (user_obj.id, group_id)) api.keystone.remove_group_user(request, group_id=group_id, user_id=user_obj.id) # TODO(lin-hua-cheng): Fix the bug when removing current user # Keystone revokes the token of the user removed from the group. # If the logon user was removed, redirect the user to logout. class AddMembersLink(tables.LinkAction): name = "add_user_link" verbose_name = _("Add...") classes = ("ajax-modal",) icon = "plus" url = constants.GROUPS_ADD_MEMBER_URL policy_rules = (("identity", "identity:list_users"), ("identity", "identity:add_user_to_group"),) def allowed(self, request, user=None): return api.keystone.keystone_can_edit_group() def get_link_url(self, datum=None): return reverse(self.url, kwargs=self.table.kwargs) class UsersTable(tables.DataTable): name = tables.Column('name', verbose_name=_('User Name')) email = tables.Column('email', verbose_name=_('Email'), filters=[defaultfilters.escape, defaultfilters.urlize]) id = tables.Column('id', verbose_name=_('User ID')) enabled = tables.Column('enabled', verbose_name=_('Enabled'), status=True, status_choices=STATUS_CHOICES, empty_value="False") class Gr
oupMembersTable(UsersTable): class Meta: name = "group_members" verbose_name = _("Group Members") table_actions = (UserFilterAction, AddMembersLink, RemoveMembers) class AddMembers(tables.BatchAction): @staticmethod def action_present(count): return unge
ttext_lazy( u"Add User", u"Add Users", count ) @staticmethod def action_past(count): return ungettext_lazy( u"Added User", u"Added Users", count ) name = "addMember" icon = "plus" requires_input = True success_url = constants.GROUPS_MANAGE_URL policy_rules = (("identity", "identity:add_user_to_group"),) def allowed(self, request, user=None): return api.keystone.keystone_can_edit_group() def action(self, request, obj_id): user_obj = self.table.get_object_by_id(obj_id) group_id = self.table.kwargs['group_id'] LOG.info('Adding user %s to group %s.' % (user_obj.id, group_id)) api.keystone.add_group_user(request, group_id=group_id, user_id=user_obj.id) # TODO(lin-hua-cheng): Fix the bug when adding current user # Keystone revokes the token of the user added to the group. # If the logon user was added, redirect the user to logout. def get_success_url(self, request=None): group_id = self.table.kwargs.get('group_id', None) return reverse(self.success_url, args=[group_id]) class GroupNonMembersTable(UsersTable): class Meta: name = "group_non_members" verbose_name = _("Non-Members") table_actions = (UserFilterAction, AddMembers)
from hypothesis import given, strategies as st import numpy as np from pysaliency.numba_utils import auc_for_one_positive from pysaliency.roc import general_roc def test_auc_for_one_positive(): asser
t auc_for_one_positive(1, [0, 2]) == 0.5 assert auc_for_one_positive(1, [1]) == 0.5 assert auc_for_one_positive(3, [0]) == 1.0 assert auc_for_one_positive(0, [3]) == 0.0 @given(st.lists(st.floats(allow_nan=False, allow_infinity=False), min_size=1), st.floats(allow_nan=False, allow_infinity=F
alse)) def test_simple_auc_hypothesis(negatives, positive): old_auc, _, _ = general_roc(np.array([positive]), np.array(negatives)) new_auc = auc_for_one_positive(positive, np.array(negatives)) np.testing.assert_allclose(old_auc, new_auc)
"License": "BSD", "License File": "/LICENSE", }, os.path.join('third_party', 'ots'): { "Name": "OTS (OpenType Sanitizer)", "URL": "http://code.google.com/p/ots/", "License": "BSD", }, os.path.join('third_party', 'pdfsqueeze'): { "Name": "pdfsqueeze", "URL": "http://code.google.com/p/pdfsqueeze/", "License": "Apache 2.0", "License File": "COPYING", }, os.path.join('third_party', 'ppapi'): { "Name": "ppapi", "URL": "http://code.google.com/p/ppapi/", }, os.path.join('third_party', 'scons-2.0.1'): { "Name": "scons-2.0.1", "URL": "http://www.scons.org", "License": "MIT", "License File": "NOT_SHIPPED", }, os.path.join('third_party', 'trace-viewer'): { "Name": "trace-viewer", "URL": "http://code.google.com/p/trace-viewer", "License": "BSD", "License File": "NOT_SHIPPED", }, os.path.join('third_party', 'v8-i18n'): { "Name": "Internationalization Library for v8", "URL": "http://code.google.com/p/v8-i18n/", "License": "Apache 2.0", }, os.path.join('third_party', 'WebKit'): { "Name": "WebKit", "URL": "http://webkit.org/", "License": "BSD and GPL v2", # Absolute path here is resolved as relative to the source root. "License File": "/webkit/LICENSE", }, os.path.join('third_party', 'webpagereplay'): { "Name": "webpagereplay", "URL": "http://code.google.com/p/web-page-replay", "License": "Apache 2.0", "License File": "NOT_SHIPPED", }, os.path.join('tools', 'grit'): { "Name": "grit", "URL": "http://code.google.com/p/grit-i18n", "License": "BSD", "License File": "NOT_SHIPPED", }, os.path.join('tools', 'gyp'): { "Name": "gyp", "URL": "http://code.google.com/p/gyp", "License": "BSD", "License File": "NOT_SHIPPED", }, os.path.join('v8'): { "Name": "V8 JavaScript Engine", "URL": "http://code.google.com/p/v8", "License": "BSD", }, os.path.join('v8', 'strongtalk'): { "Name": "Strongtalk", "URL": "http://www.strongtalk.org/", "License": "BSD", # Absolute path here is resolved as relative to the source root. "License File": "/v8/LICENSE.strongtalk", }, } # Special value for 'License File' field used to indicate that the license file # should not be used in about:credits. NOT_SHIPPED = "NOT_SHIPPED" class LicenseError(Exception): """We raise this exception when a directory's licensing info isn't fully filled out.""" pass def AbsolutePath(path, filename, root): """Convert a path in README.chromium to be absolute based on the source root.""" if filename.startswith('/'): # Absolute-looking paths are relative to the source root # (which is the directory we're run from). absolute_path = os.path.join(root, filename[1:]) else: absolute_path = os.path.join(root, path, filename) if os.path.exists(absolute_path): return absolute_path return None def ParseDir(path, root, require_license_file=True): """Examine a third_party/foo component and extract its metadata.""" # Parse metadata fields out of README.chromium. # We examine "LICENSE" for the license file by default. metadata = { "License File": "LICENSE", # Relative path to license text. "Name": None, # Short name (for header on about:credits). "URL": None, # Project home page. "License": None, # Software license. } # Relative path to a file containing some html we're required to place in # about:credits. optional_keys = ["Required Text", "License Android Compatible"] if path in SPECIAL_CASES: metadata.update(SPECIAL_CASES[path]) else: # Try to find README.chromium. readme_path = os.path.join(root, path, 'README.chromium') if not os.path.exists(readme_path): raise LicenseError("missing README.chromium or licenses.py " "SPECIAL_CASES entry") for line in open(readme_path): line = line.strip() if not line: break for key in metadata.keys() + optional_keys: field = key + ": " if line.startswith(field): metadata[key] = line[len(field):] # Check that all expected metadata is present. for key, value in metadata.iteritems(): if not value: raise LicenseError("couldn't find '" + key + "' line " "in README.chromium or licences.py " "SPECIAL_CASES") # Special-case modules that aren't in the shipping product, so don't need # their license in about:credits. if metadata["License File"] != NOT_SHIPPED: # Check that the license file exists. for filename in (metadata["License File"], "COPYING"): license_path = AbsolutePath(path, filename, root) if license_path is not None: break if require_license_file and not license_path: raise LicenseError("License file not found. " "Either add a file named LICENSE, " "import upstream's COPYING if available, " "or add a 'License File:' line to " "README.chromium with the appropriate path.") metadata["License File"] = license_path if "Required Text" in metadata: required_path = AbsolutePath(path, metadata["Required Text"], root) if required_path is not None: metadata["Required Text"] = required_path else: raise LicenseError("Required text file listed but not found.") return metadata def ContainsFiles(path, root): """Determines whether any files exist in a directory or in any of its subdirectories.""" for _, dirs, files in os.wal
k(os.path.join(root, path)): if files:
return True for vcs_metadata in VCS_METADATA_DIRS: if vcs_metadata in dirs: dirs.remove(vcs_metadata) return False def FilterDirsWithFiles(dirs_list, root): # If a directory contains no files, assume it's a DEPS directory for a # project not used by our current configuration and skip it. return [x for x in dirs_list if ContainsFiles(x, root)] def FindThirdPartyDirs(prune_paths, root): """Find all third_party directories underneath the source root.""" third_party_dirs = [] for path, dirs, files in os.walk(root): path = path[len(root)+1:] # Pretty up the path. if path in prune_paths: dirs[:] = [] continue # Prune out directories we want to skip. # (Note that we loop over PRUNE_DIRS so we're not iterating over a # list that we're simultaneously mutating.) for skip in PRUNE_DIRS: if skip in dirs: dirs.remove(skip) if os.path.basename(path) == 'third_party': # Add all subdirectories that are not marked for skipping. for dir in dirs: dirpath = os.path.join(path, dir) if dirpath not in prune_paths: third_party_dirs.append(dirpath) # Don't recurse into any subdirs from here. dirs[:] = [] continue # Don't recurse into paths in ADDITIONAL_PATHS, like we do with regular # third_party/foo paths. if path in ADDITIONAL_PATHS: dirs[:] = [] for dir in ADDITIONAL_PATHS: if dir not in prune_paths: third_party_dirs.append(dir) return third_party_dirs def ScanThirdPartyDirs(root=None): """Scan a list of directories and report on any problems we find.""" if root is None: root = os.getcwd() third_party_dirs = FindThirdPartyDirs(PRUNE_PAT
ete on 31 Dec 2030. # Note that midnight GMT must be specified. # Be sure to quote your date strings - s3_lifecycle: name: mybucket transition_date: "2020-12-30T00:00:00.000Z" expiration_date: "2030-12-30T00:00:00.000Z" prefix: /logs/ status: enabled state: present # Disable the rule created above - s3_lifecycle: name: mybucket prefix: /logs/ status: disabled state: present # Delete the lifecycle rule created above - s3_lifecycle: name: mybucket prefix: /logs/ state: absent # Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class. - s3_lifecycle: name: mybucket prefix: /backups/ storage_class: standard_ia transition_days: 31 state: present status: enabled ''' import xml.etree.ElementTree as ET import copy import datetime try: import dateutil.parser HAS_DATEUTIL = True except ImportError: HAS_DATEUTIL = False try: import boto import boto.ec2 from boto.s3.connection import OrdinaryCallingFormat, Location from boto.s3.lifecycle import Lifecycle, Rule, Expiration, Transition from boto.exception import BotoServerError, S3ResponseError HAS_BOTO = True except ImportError: HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info def create_lifecycle_rule(connection, module): name = module.params.get("name") expiration_date = module.params.get("expiration_date") expiration_days = module.params.get("expiration_days") prefix = module.params.get("prefix") rule_id = module.params.get("rule_id") status = module.params.get("status") storage_class = module.params.get("storage_class") transition_date = module.params.get("transition_date") transition_days = module.params.get("transition_days") changed = False try: bucket = connection.get_bucket(name) except S3ResponseError as e: module.fail_json(msg=e.message) # Get the bucket's current lifecycle rules try: current_lifecycle_obj = bucket.get_lifecycle_config() except S3ResponseError as e: if e.error_code == "NoSuchLifecycleConfiguration": current_lifecycle_obj = Lifecycle() else: module.fail_json(msg=e.message) # Create expiration if expiration_days is not None: expiration_obj = Expiration(days=expiration_days) elif expiration_date is not None: expiration_obj = Expiration(date=expiration_date) else: expiration_obj = None # Create transition if transition_days is not None: transition_obj = Transition(days=transition_days, storage_class=storage_class.upper()) elif transition_date is not None: transition_obj = Transition(date=transition_date, storage_class=storage_class.upper()) else: transition_obj = None # Create rule rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj) # Create lifecycle lifecycle_obj = Lifecycle() appended = False # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule if current_lifecycle_obj: # If rule ID exists, use that for comparison otherwise compare based on prefix for existing_rule in current_lifecycle_obj: if rule.id == existing_rule.id: if compare_rule(rule, existing_rule): lifecycle_obj.append(rule) appended = True else: lifecycle_obj.append(rule) changed = True appended = True elif rule.prefix == existing_rule.prefix: existing_rule.id = None if compare_rule(rule, existing_rule): lifecycle_obj.append(rule) appended = True else: lifecycle_obj.append(rule) changed = True appended = True else: lifecycle_obj.append(existing_rule) # If nothing appended then append now as the rule must not exist if not appended: lifecycle_obj.append(rule) changed = True else: lifecycle_obj.append(rule) changed = True # Write lifecycle to bucket try: bucket.configure_lifecycle(lifecycle_obj) except S3ResponseError as e: module.fail_js
on(msg=e.message) module.exit_json(changed=changed) def compare_rule(rule_a, rule_b): # Copy objects rule1 = copy.deepcopy(rule_a) rule2 = copy.deepcopy(rule_b) # Delete Rule from Rule try: del rule1.Rule except AttributeError: pass try: del rule2.Rule except AttributeError: pass # Extract Expiration and Transition objects rule1_expiration = rule1.expiration rule1_transition = rule
1.transition rule2_expiration = rule2.expiration rule2_transition = rule2.transition # Delete the Expiration and Transition objects from the Rule objects del rule1.expiration del rule1.transition del rule2.expiration del rule2.transition # Compare if rule1_transition is None: rule1_transition = Transition() if rule2_transition is None: rule2_transition = Transition() if rule1_expiration is None: rule1_expiration = Expiration() if rule2_expiration is None: rule2_expiration = Expiration() if (rule1.__dict__ == rule2.__dict__ and rule1_expiration.__dict__ == rule2_expiration.__dict__ and rule1_transition.__dict__ == rule2_transition.__dict__): return True else: return False def destroy_lifecycle_rule(connection, module): name = module.params.get("name") prefix = module.params.get("prefix") rule_id = module.params.get("rule_id") changed = False if prefix is None: prefix = "" try: bucket = connection.get_bucket(name) except S3ResponseError as e: module.fail_json(msg=e.message) # Get the bucket's current lifecycle rules try: current_lifecycle_obj = bucket.get_lifecycle_config() except S3ResponseError as e: if e.error_code == "NoSuchLifecycleConfiguration": module.exit_json(changed=changed) else: module.fail_json(msg=e.message) # Create lifecycle lifecycle_obj = Lifecycle() # Check if rule exists # If an ID exists, use that otherwise compare based on prefix if rule_id is not None: for existing_rule in current_lifecycle_obj: if rule_id == existing_rule.id: # We're not keeping the rule (i.e. deleting) so mark as changed changed = True else: lifecycle_obj.append(existing_rule) else: for existing_rule in current_lifecycle_obj: if prefix == existing_rule.prefix: # We're not keeping the rule (i.e. deleting) so mark as changed changed = True else: lifecycle_obj.append(existing_rule) # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration try: if lifecycle_obj: bucket.configure_lifecycle(lifecycle_obj) else: bucket.delete_lifecycle_configuration() except BotoServerError as e: module.fail_json(msg=e.message) module.exit_json(changed=changed) def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), expiration_days=dict(default=None, required=False, type='int'), expiration_date=dict(default=None, required=False, type='str'), prefix=dict(default=None, required=False), requester_pays=dict(default='no', type='bool'), rule_id=dict(required=False, type='str'), state=dict(default='present', choices=['present', '
from setuptools
import setup setup( name="pystor", version="0.9.1", author="Ethronsoft", author_email='dev@ethronsoft.com', zip_safe=False, packages=["ethronsoft", "ethronsoft.pystor"], license=open("LICENSE.txt").read(), include_package_data=True, keywords="nosql document store serverless embedded", url="https://github.com/ethronsoft/stor", description="Python bindings to esft::stor, a C++ NoSQL ser
verless document store", install_requires=[ 'enum34' ], setup_requires=[ 'pytest-runner' ], tests_require=[ 'pytest' ], entry_points={ 'console_scripts':[ "pystor = ethronsoft.pystor.__main__:main" ] } )
from . import views def register_in(router): router.register(r'openstack', views.OpenStackServiceViewSet, base_name='openstack') router.register(r'openstack-images', views.ImageViewSet, base_name='openstack-image') router.register(r'openstack-flavors', views.FlavorViewSet, base_name='openstack-flavor') router.register(r'openstack-tena
nts', views.TenantViewSet, base_name='openstack-tenant') router.register(r'openstack-service-project-link', views.OpenStackServiceProjectLinkViewSet, base_name='openstack-spl') router.register(r'openstack-security-groups', views.SecurityGroupViewSet, base_name='openstack-sgp') router.register(r'openstack-floating-ips', views.FloatingIPViewSet, b
ase_name='openstack-fip') router.register(r'openstack-networks', views.NetworkViewSet, base_name='openstack-network') router.register(r'openstack-subnets', views.SubNetViewSet, base_name='openstack-subnet')
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-03-07 23:02 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='AdvancedFilter', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=255)), ('url', models.CharField(max_length=255)), ('b64_query', models.CharField(max_length=2048)), ('model', models.CharField(blank=True, max_length=64, null=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_advanced_filte
rs', to=settings.AUTH_USER_MODEL)), ('gr
oups', models.ManyToManyField(blank=True, to='auth.Group')), ('users', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Advanced Filters', 'verbose_name': 'Advanced Filter', }, ), ]
or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Handlers that are not directly related to course content.""" __author__ = 'Saifu Angto (saifu@google.com)' import base64 import hmac import os import time import urlparse import appengine_config from models import transforms from models.config import ConfigProperty from models.config import ConfigPropertyEntity from models.courses import Course from models.models import Student from models.roles import Roles import webapp2 from google.appengine.api import namespace_manager from google.appengine.api import users # The name of the template dict key that stores a course's base location. COURSE_BASE_KEY = 'gcb_course_base' # The name of the template dict key that stores data from course.yaml. COURSE_INFO_KEY = 'course_info' XSRF_SECRET_LENGTH = 20 XSRF_SECRET = ConfigProperty( 'gcb_xsrf_secret', str, ( 'Text used to encrypt tokens, which help prevent Cross-site request ' 'forgery (CSRF, XSRF). You can set the value to any alphanumeric text, ' 'preferably using 16-64 characters. Once you change this value, the ' 'server rejects all subsequent requests issued using an old value for ' 'this variable.'), 'course builder XSRF secret') class ReflectiveRequestHandler(object): """Uses reflection to handle custom get() and post() requests. Use this class as a mix-in with any webapp2.RequestHandler to allow request dispatching to multiple get() and post() methods based on the 'action' parameter. Open your existing webapp2.RequestHandler, add this class as a mix-in. Define the following class variables: default_action = 'list' get_actions = ['default_action', 'edit'] post_actions = ['save'] Add instance methods named get_list(self), get_edit(self), post_save(self). These methods will now be called automatically based on the 'action' GET/POST parameter. """ def create_xsrf_token(self, action): return XsrfTokenManager.create_xsrf_token(action) def get(self): """Handles GET.""" action = self.request.get('action') if not action: action = self.default_action if not action in self.get_actions: self.error(404) return handler = getattr(self, 'get_%s' % action) if not handler: self.error(404) return return handler() def post(self): """Handles POST.""" action = self.request.get('action') if not action or not action in self.post_actions: self.error(404) return handler = getattr(self, 'post_%s' % action) if not handler: self.error(404) return # Each POST request must have valid XSRF token. xsrf_token = self.request.get('xsrf_token') if not XsrfTokenManager.is_xsrf_token_valid(xsrf_token, action): self.error(403) return return handler() class ApplicationHandler(webapp2.RequestHandler): """A handler that is aware of the application context.""" @classmethod def is_absolute(cls, url): return bool(urlparse.urlparse(url).scheme) @classmethod def get_base_href(cls, handler): """Computes current course <base> href.""" base = handler.app_context.get_slug() if not base.endswith('/'): base = '%s/' % base # For IE to work with the <base> tag, its href must be an absolute URL. if not cls.is_absolute(base): parts = urlparse.urlparse(handler.request.url) base = urlparse.urlunparse( (parts.scheme, parts.netloc, base, None, None, None)) return base def __init__(self, *args, **kwargs): super(ApplicationHandler, self).__init__(*args, **kwargs) self.template_value = {} def get_template(self, template_file, additional_dirs=None): """Computes location of template files for the current namespace.""" self.template_value[COURSE_INFO_KEY] = self.app_context.get_environ() self.template_value['is_course_admin'] = Roles.is_course_admin( self.app_context) self.template_value[ 'is_read_write_course'] = self.app_context.fs.is_read_write() self.template_value['is_super_admin'] = Roles.is_super_admin() self.template_value[COURSE_BASE_KEY] = self.get_base_href(self) return self.app_context.get_template_environ( self.template_value[COURSE_INFO_KEY]['course']['locale'], additional_dirs ).get_template(template_file) def canonicalize_url(self, location): """Adds the current namespace URL prefix to the relative 'location'.""" is_relative = ( not self.is_absolute(location) and not location.startswith(self.app_context.get_slug())) has_slug = ( self.app_context.get_slug() and self.app_context.get_slug() != '/') if is_relative and has_slug: location = '%s%s' % (self
.app_context.get_slug(), location) return location def redirect(self, location): super(ApplicationHandler, self).redirect( self.canonicalize_url(location)) class BaseHandler(ApplicationHandler): """Base handler.""" def __init__(self, *args, **kwargs): super(BaseHandler, self).__init__(*args, **kwargs
) self.course = None def get_course(self): if not self.course: self.course = Course(self) return self.course def find_unit_by_id(self, unit_id): """Gets a unit with a specific id or fails with an exception.""" return self.get_course().find_unit_by_id(unit_id) def get_units(self): """Gets all units in the course.""" return self.get_course().get_units() def get_lessons(self, unit_id): """Gets all lessons (in order) in the specific course unit.""" return self.get_course().get_lessons(unit_id) def get_progress_tracker(self): """Gets the progress tracker for the course.""" return self.get_course().get_progress_tracker() def get_user(self): """Validate user exists.""" user = users.get_current_user() if not user: self.redirect(users.create_login_url(self.request.uri)) else: return user def personalize_page_and_get_user(self): """If the user exists, add personalized fields to the navbar.""" user = self.get_user() if user: self.template_value['email'] = user.email() self.template_value['logoutUrl'] = ( users.create_logout_url(self.request.uri)) return user def personalize_page_and_get_enrolled(self): """If the user is enrolled, add personalized fields to the navbar.""" user = self.personalize_page_and_get_user() if not user: self.redirect(users.create_login_url(self.request.uri)) return None student = Student.get_enrolled_student_by_email(user.email()) if not student: self.redirect('/preview') return None return student def assert_xsrf_token_or_fail(self, request, action): """Asserts the current request has proper XSRF token or fails.""" token = request.get('xsrf_token') if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action): self.error(403) return False return True def render(self, template_file): """Renders a template.""" template = self.get_template(template_file) self.response.out.write(template.render(self.template_value)) class BaseRESTHandler(BaseHandler): """Base REST handler.""" def assert_xsrf_token_or_fail(self, token_dict, action, args_dict): """Asserts that curren
s = document.getElementById('no-transitions'), head = document.head || document.getElementsByTagName('head')[0]; head.removeChild(styles) """) def select_option_by_text(select_browser_query, option_text): """ Chooses an option within a select by text (helper method for Select's select_by_visible_text method). """ select = Select(select_browser_query.first.results[0]) select.select_by_visible_text(option_text) def get_selected_option_text(select_browser_query): """ Returns the text value for the first selected option within a select. """ select = Select(select_browser_query.first.results[0]) return select.first_selected_option.text def get_options(select_browser_query): """ Returns all the options for the given select. """ return Select(select_browser_query.first.results[0]).options def generate_course_key(org, number, run): """ Makes a CourseLocator from org, number and run """ default_store = os.environ.get('DEFAULT_STORE', 'draft') return CourseLocator(org, number, run, deprecated=(default_store == 'draft')) def select_option_by_value(browser_query, value): """ Selects a html select element by matching value attribute """ select = Select(browser_query.first.results[0]) select.select_by_value(value) def options_selected(): """ Returns True if all options in select element where value attribute matches `value`. if any option is not selected then returns False and select it. if value is not an option choice then it returns False. """ all_options_selected = True has_option = False for opt in select.options: if opt.get_attribute('value') == value: has_option = True if not opt.is_selected(): all_options_selected = False opt.click() # if value is not an option choice then it should return false if all_options_selected and not has_option: all_options_selected = False return all_options_selected # Make sure specified option is actually selected EmptyPromise(options_selected, "Option is selected").fulfill() def is_option_value_selected(browser_query, value): """ return true if given value is selected in html select element, else return false. """ select = Select(browser_query.first.results[0]) ddl_selected_value = select.first_selected_option.get_attribute('value') return ddl_selected_value == value def element_has_text(page, css_selector, text): """ Return true if the given text is present in the list. """ text_present = False text_list = page.q(css=css_selector).text if len(text_list) > 0 and (text in text_list): text_present = True return text_present def get_modal_alert(browser): """ Returns instance of modal alert box shown in browser after waiting for 6 seconds """ WebDriverWait(browser, 6).until(EC.alert_is_present()) return browser.switch_to.alert class EventsTestMixin(TestCase): """ Helpers and setup for running tests that evaluate events emitted """ def setUp(self): super(EventsTestMixin, self).setUp() self.event_collection = MongoClient()["test"]["events"] self.reset_event_tracking() def reset_event_tracking(self): """Drop any events that have been collected thus far and start collecting again from scratch.""" self.event_collection.drop() self.start_time = datetime.now() @contextmanager def capture_events(self, event_filter=None, number_of_matches=1, captured_events=None): """ Context manager that captures all events emitted while executing a particular block. All captured events are stored in the list referenced by `captured_events`. Note that this list is appended to *in place*. The events will be appended to the list in the order they are emitted. The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should match that provided expectation. `number_of_matches` tells this context manager when enough events have been found and it can move on. The context manager will not exit until this many events have passed the filter. If not enough events are found before a timeout expires, then this will raise a `BrokenPromise` error. Note that this simply states that *at least* this many events have been emitted, so `number_of_matches` is simply a lower bound for the size of `captured_events`. """ start_time = datetime.utcnow() yield events = self.wait_for_events( start_time=start_time, event_filter=event_filter, number_of_matches=number_of_matches) if captured_events is not None and hasattr(captured_events, 'append') and callable(captured_events.append): for event in events: captured_events.append(event) @contextmanager def assert_events_match_during(self, event_filter=None, expected_events=None): """ Context manager that ensures that events matching the `event_filter` and `expected_events` are emitted. This context manager will filter out the event stream using the `event_filter` and wait for `len(expected_events)` to match the filter. It will then compare the events in order with their counterpart in `expected_events` to ensure they match the more detailed assertion. Typically `event_filter` will be an `event_type` filter and the `expected_events` list will contain more detailed assertions. """ captured_events = [] with self.capture_events(event_filter, len(expected_events), captured_events): yield self.assert_events_match(expected_events, captured_events) def wait_for_events(self, start_time=None, event_filter=None, number_of_matches=1, timeout=None): """ Wait for `number_of_matches` events to pass the `event_filter`. By default, this will look at all events that have been emitted since the beginning of the setup of this mixin. A custom `start_time` can be specified which will limit the events searched to only those emitted after that time. The `event_filter` is expected to b
e a callable that allows you to filter the event stream and select particular events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should match that provided expectation. `number_of_matches` lets us know when enough events have been found and it can move on. The funct
ion will not return until this many events have passed the filter. If not enough events are found before a timeout expires, then this will raise a `BrokenPromise` error. Note that this simply states that *at least* this many events have been emitted, so `number_of_matches` is simply a lower bound for the size of `captured_events`. Specifying a custom `timeout` can allow you to extend the default 30 second timeout if necessary. """ if start_time is None: start_time = self.start_time if timeout is None: timeout = 30 def check_for_matching_events(): """Gather any events that have been emitted since `start_time`""" return self.matching_events_were_emitted( start_time=start_time, event_filter=event_filter, number_of_matches=number_of_matches ) return Promise( check_for_matching_events, # This is a bit of a hack, Promise calls str(description), so I set the description to an object with a # custom __str__ and have it do some intelligent stuff to generate a helpful error message. CollectedEventsD
from rand
om import random from banti.linegraph import LineGraph class Weight(): def __init__(self, val): self.val = val def combine(self, other): return random() < .3, Weight(int(100*random())+(self.v
al+other.val)//2) def strength(self): return self.val def __repr__(self): return "{}".format(self.val) weights = [Weight(val) for val in range(10, 80, 10)] print(list(enumerate(weights))) lgraph = LineGraph(weights) print(lgraph.lchildren) print(lgraph) lgraph.process_tree() print(lgraph) paths = lgraph.get_paths() for path in paths: print(path, lgraph.path_strength(path)) print("Strongest Path: ", lgraph.strongest_path())
__all__ = ['chatcommand', 'execute_chat_command', 'save_matchsettings', '_register_chat_command'] import functools import inspect from .events import eventhandler, send_event from .log import logger from .asyncio_loop import loop _registered_chat_commands = {} # dict of all registered chat commands async def execute_chat_command(server, player, cmd): #if not player.is_admin(): #r = check_rights(player) args = cmd.split(' ') if args[len(args) - 1] is '': del args[len(args) - 1] if args[0] in _registered_chat_commands: try: if len(args) == 1: server.run_task(_registered_chat_commands[args[0]](server, player)) else: server.run_task(_registered_chat_commands[args[0]](server, player, *args[1:])) except Exception as exp: server.chat_send_error('fault use of chat command: ' + args[0], player) server.chat_send_error(str(exp), player) server.chat_send('use /help to see available chat commands', player) raise else: server.chat_send_error('unknown chat command: ' + args[0], player) server.chat_send('use /help to see available chat commands', player) def _register_chat_command(chat_command, function): if chat_command not in _registered_chat_commands: _registered_chat_commands[chat_comman
d] = function else: logger.error('chatcommand ' + "'" + chat_command + "'" + ' already registered to ' + str(function)) return False def _unregister_chat_command(chat_command): if chat_command not in _registered_chat_commands: raise 'chat command not registered' else: del _registered_chat_commands[chat_command] # @chatcommand decorator def chatcommand(cmd): def chatcommand_decorator(func):
if _register_chat_command(cmd, func) is False: return module = inspect.getmodule(func) logger.debug('chatcommand ' + "'" + cmd + "' connected to " + str(func) + ' in module ' + str(module)) @functools.wraps(func) def func_wrapper(*args, **kwargs): return func(*args, **kwargs) return func_wrapper return chatcommand_decorator @eventhandler('ManiaPlanet.PlayerChat') async def _on_player_chat(server, callback): p = server.player_from_login(callback.login) # ignore normal chat if not callback.isCommand: if p is not None: send_event(server, 'pie.PlayerChat', p) return server.run_task(execute_chat_command(server, p, callback.text)) @chatcommand('/help') async def cmd_help(server, player): """list all chat commands""" server.chat_send('help:', player) for cmd in _registered_chat_commands: if _registered_chat_commands[cmd].__doc__ is None: docstr = 'no description set' else: docstr = _registered_chat_commands[cmd].__doc__ server.chat_send(cmd + ' - ' + docstr, player) async def save_matchsettings(server, filename = None): await server.rpc.SaveMatchSettings('MatchSettings\\' + server.config.matchsettings) @chatcommand('/savematchsettings') async def cmd_savematchsettings(server, player): await save_matchsettings(server) server.chat_send('matchsettings saved: ' + server.config.matchsettings) @chatcommand('/shutdown') async def cmd_shutdown(server, player): await server.chat_send_wait('pie shutdown') loop.stop() @chatcommand('/players') async def cmd_players(server, player): for player in server.players: server.chat_send(server.players[player].nickname)
n instance from the base64 data. :param screenshot: The screenshot image. :param driver: The webdriver for the session. """ return EyesWebDriverScreenshot(driver, screenshot=screenshot) def __init__(self, driver, screenshot=None, screenshot64=None, is_viewport_screenshot=None, frame_location_in_screenshot=None): # type: (EyesWebDriver, Image.Image, None, tp.Optional[bool], tp.Optional[Point]) -> None """ Initializes a Screenshot instance. Either screenshot or screenshot64 must NOT be None. Should not be used directly. Use create_from_image/create_from_base64 instead. :param driver: EyesWebDriver instance which handles the session from which the screenshot was retrieved. :param screenshot: image instance. If screenshot64 is None,
this variable must NOT be non
e. :param screenshot64: The base64 representation of a png image. If screenshot is None, this variable must NOT be none. :param is_viewport_screenshot: Whether the screenshot object represents a viewport screenshot or a full screenshot. :param frame_location_in_screenshot: The location of the frame relative to the top,left of the screenshot. :raise EyesError: If the screenshots are None. """ if screenshot is None and screenshot64 is None: raise EyesError("both screenshot and screenshot64 are None!") if screenshot64: screenshot = image_utils.image_from_bytes(base64.b64decode(screenshot64)) # initializing of screenshot super(EyesWebDriverScreenshot, self).__init__(image=screenshot) self._driver = driver self._viewport_size = driver.get_default_content_viewport_size(force_query=False) # type: ViewPort self._frame_chain = driver.frame_chain.clone() if self._frame_chain: chain_len = len(self._frame_chain) self._frame_size = self._frame_chain[chain_len - 1].outer_size else: try: self._frame_size = driver.get_entire_page_size() except WebDriverException: # For Appium, we can't get the "entire page size", so we use the viewport size. self._frame_size = self._viewport_size # For native Appium Apps we can't get the scroll position, so we use (0,0) try: self._scroll_position = driver.get_current_position() except (WebDriverException, EyesError): self._scroll_position = Point(0, 0) if is_viewport_screenshot is None: is_viewport_screenshot = (self._screenshot.width <= self._viewport_size['width'] and self._screenshot.height <= self._viewport_size['height']) self._is_viewport_screenshot = is_viewport_screenshot if frame_location_in_screenshot is None: if self._frame_chain: frame_location_in_screenshot = EyesWebDriverScreenshot \ .calc_frame_location_in_screenshot(self._frame_chain, is_viewport_screenshot) else: # The frame is the default content frame_location_in_screenshot = Point(0, 0) if self._is_viewport_screenshot: frame_location_in_screenshot.offset(-self._scroll_position.x, -self._scroll_position.y) self._frame_location_in_screenshot = frame_location_in_screenshot self._frame_screenshot_intersect = Region(frame_location_in_screenshot.x, frame_location_in_screenshot.y, self._frame_size['width'], self._frame_size['height']) self._frame_screenshot_intersect.intersect(Region(width=self._screenshot.width, height=self._screenshot.height)) @staticmethod def calc_frame_location_in_screenshot(frame_chain, is_viewport_screenshot): first_frame = frame_chain[0] location_in_screenshot = Point(first_frame.location['x'], first_frame.location['y']) # We only need to consider the scroll of the default content if the screenshot is a # viewport screenshot. If this is a full page screenshot, the frame location will not # change anyway. if is_viewport_screenshot: location_in_screenshot.x -= first_frame.parent_scroll_position.x location_in_screenshot.y -= first_frame.parent_scroll_position.y # For inner frames we must calculate the scroll inner_frames = frame_chain[1:] for frame in inner_frames: location_in_screenshot.x += frame.location['x'] - frame.parent_scroll_position.x location_in_screenshot.y += frame.location['y'] - frame.parent_scroll_position.y return location_in_screenshot @property def frame_chain(self): return self._frame_chain def get_base64(self): if not self._screenshot64: self._screenshot64 = image_utils.get_base64(self._screenshot) return self._screenshot64 def get_location_relative_to_frame_viewport(self, location): result = {'x': location['x'], 'y': location['y']} if self._frame_chain or self._is_viewport_screenshot: result['x'] -= self._scroll_position.x result['y'] -= self._scroll_position.y return result def get_sub_screenshot_by_region(self, region): sub_screenshot_region = self.get_intersected_region(region) if sub_screenshot_region.is_empty(): raise OutOfBoundsError("Region {0} is out of bounds!".format(region)) # If we take a screenshot of a region inside a frame, then the frame's (0,0) is in the # negative offset of the region.. sub_screenshot_frame_location = Point(-region.left, -region.top) # FIXME Calculate relative region location? (same as the java version) screenshot = image_utils.get_image_part(self._screenshot, sub_screenshot_region) return EyesWebDriverScreenshot(self._driver, screenshot, is_viewport_screenshot=self._is_viewport_screenshot, frame_location_in_screenshot=sub_screenshot_frame_location) def get_element_region_in_frame_viewport(self, element): location, size = element.location, element.size relative_location = self.get_location_relative_to_frame_viewport(location) x, y = relative_location['x'], relative_location['y'] width, height = size['width'], size['height'] # We only care about the part of the element which is in the viewport. if x < 0: diff = -x # IMPORTANT the diff is between the original location and the viewport's bounds. width -= diff x = 0 if y < 0: diff = -y height -= diff y = 0 if width <= 0 or height <= 0: raise OutOfBoundsError("Element's region is outside the viewport! [(%d, %d) %d x %d]" % (location['x'], location['y'], size['width'], size['height'])) return Region(x, y, width, height) def get_intersected_region(self, region): region_in_screenshot = region.clone() region_in_screenshot.left += self._frame_location_in_screenshot.x region_in_screenshot.top += self._frame_location_in_screenshot.y region_in_screenshot.intersect(self._frame_screenshot_intersect) return region_in_screenshot def get_viewport_screenshot(self): # if screenshot if full page if not self._is_viewport_screenshot and not eyes_selenium_utils.is_mobile_device(self._driver): return self.get_sub_screenshot_by_region( Region(top=
from functools import wraps from flask import Flask, make_response from werkzeug.contrib.atom import AtomFeed from datetime import datetime as dt from HTMLParser import HTMLParser from bs4 import BeautifulSoup import praw app = Flask(__name__) def get_api(): USER_AGENT = "reddit_wrapper for personalized rss see: /u/kotfic" return praw.Reddit(user_agent=USER_AGENT) def reddit(label, subreddit, limit=25): """Decorator used to wrap functions that alter the body of a subreddit feed. This function calls out to the subreddit using PRAW and passes the decorated function each article object one at a time. the function is expected to return a string containing the desired contents of the atom <content> tag.""" def _reddit(func): @wraps(func) def wrap_reddit(): base = "http://www.reddit.com/r/{}/" feed = AtomFeed(label, feed_url=base.format(subreddit), url=base.format(subreddit)) articles = get_api().get_subreddit(subreddit).get_hot(limit=limit) for article in articles: feed
.add(article.title, func(article), content_type='html', author=article.author.name, url=article.url, updated=dt.fromtimestamp(int(article.created)), published=dt.fromtimestamp(
int(article.created))) r = make_response(feed.get_response()) r.headers['Content-Type'] = "application/xml" return r return wrap_reddit return _reddit @app.route('/r/python.atom') @reddit("Python Subreddit", "python") def python(article): try: return HTMLParser().unescape(article.selftext_html) except TypeError: return '' @app.route('/r/funny.atom') @reddit("Funny Subreddit", "funny") def funny(article): try: soup = BeautifulSoup("<img src=\"{}\" />".format(article.url)) return str(soup) except TypeError: return '' @app.route('/r/emacs.atom') @reddit("Emacs Subreddit", "emacs") def emacs(article): try: return HTMLParser().unescape(article.selftext_html) except TypeError: return '' def main(): app.run(debug=True) if __name__ == "__main__": main()
_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: sf_account_manager short_description: Manage SolidFire accounts extends_documentation_fragment: - netapp.solidfire version_added: '2.3' author: Sumit Kumar (sumit4@netapp.com) description: - Create, destroy, or update accounts on SolidFire options: state: description: - Whether the specified account should exist or not. required: true choices: ['present', 'absent'] name: description: - Unique username for this account. (May be 1 to 64 characters in length). required: true new_name: description: - New name for the user account. required: false default: None initiator_secret: description: - CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable. - The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret. - If not specified, a random secret is created. required: false target_secret: description: - CHAP secret to use for the target (mutual CHAP authentication). - Should be 12-16 characters long and impenetrable. - The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret. - If not specified, a random secret is created. required: false attributes: description: List of Name/Value pairs in JSON object format. required: false account_id: description: - The ID of the account to manage or update. required: false default: None status: description: - Status of the account. required: false ''' EXAMPLES = """ - name: Create Account sf_account_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: present name: TenantA - name: Modify Account sf_account_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: present name: TenantA new_name: TenantA-Renamed - name: Delete Account sf_account_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: absent name: TenantA-Renamed """ RETURN = """ """ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native import ansible.module_utils.netapp as netapp_utils HAS_SF_SDK = netapp_utils.has_sf_sdk() class SolidFireAccount(object): def __init__(self): self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() self.argument_spec.update(dict( state=dict(required=True, choices=['present', 'absent']), name=dict(required=True, type='str'), account_id=dict(required=False, type='int', default=None), new_name=dict(required=False, type='str', default=None), initiator_secret=dict(required=False, type='str'), target_secret=dict(required=False, type='str'), attributes=dict(required=False, type='dict'), status=dict(required=False, type='str'), )) self.module = AnsibleModule( argument_spec=self.argument_spec, supports_check_mode=True ) p = self.module.params # set up state variables self.state = p['state'] self.name = p['name'] self.account_id = p['account_id'] self.new_name = p['new_name'] self.initiator_secret = p['initiator_secret'] self.target_secret = p['target_secret'] self.attributes = p['attributes'] self.status = p['status'] if HAS_SF_SDK is False: self.module.fail_json(msg="Unable to import the SolidFire Python SDK") else: self.sfe = netapp_utils.create_sf_connection(module=self.module) def get_account(self): """ Return account object if found :return: Details about the account. None if not found. :rtype: dict """ account_list = self.sfe.list_accounts() for account in account_list.accounts: if account.username == self.name: # Update self.account_id: if self.account_id is not None: if account.account_id == self.account_id: return account else: self.account_id = account.account_id return account return None def create_account(self): try: self.sfe.add_account(username=self.name, initiator_secret=self.initiator_secret, target_secret=self.target_secret,
attributes=self.attributes) except Exception as e: s
elf.module.fail_json(msg='Error creating account %s: %s)' % (self.name, to_native(e)), exception=traceback.format_exc()) def delete_account(self): try: self.sfe.remove_account(account_id=self.account_id) except Exception as e: self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)), exception=traceback.format_exc()) def update_account(self): try: self.sfe.modify_account(account_id=self.account_id, username=self.new_name, status=self.status, initiator_secret=self.initiator_secret, target_secret=self.target_secret, attributes=self.attributes) except Exception as e: self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)), exception=traceback.format_exc()) def apply(self): changed = False account_exists = False update_account = False account_detail = self.get_account() if account_detail: account_exists = True if self.state == 'absent': changed = True elif self.state == 'present': # Check if we need to update the account if account_detail.username is not None and self.new_name is not None and \ account_detail.username != self.new_name: update_account = True changed = True elif account_detail.status is not None and self.status is not None \ and account_detail.status != self.status: update_account = True changed = True elif account_detail.initiator_secret is not None and self.initiator_secret is not None \ and account_detail.initiator_secret != self.initiator_secret: update_account = True changed = True elif account_detail.target_secret is not None and self.target_secret is not None \ and account_detail.target_secret != self.target_secret: update_account = True changed = True elif account_detail.attributes is not None and self.attributes is not None \ and account_detail.attributes != self.attributes: update_account = True changed = True else: if self.state == 'present': changed = True if changed: if self.module.check_mode: pass else: if self.state == 'present': if
import sys, os import tweepy # File with colon-separaten consumer/access token and secret consumer_file='twitter.consumer' access_file='twitter.access' def __load_auth(file): if os.path.exists(file): with open(file) as f: tokens = f.readline().replace('\n',''
).replace('\r','').split(':') if len(tokens) == 2: return tokens[0],tokens[1] else: raise ValueError("Expecting two colon-separated tokens") else: raise IOError("File not found: %s" % file) def twit(message, secret_dir='/secret'): # # Load the twitter consumer and access tokens and secrets consumer_token, consumer_secret = __load_auth(o
s.path.join(secret_dir, consumer_file)) access_token, access_secret = __load_auth(os.path.join(secret_dir, access_file)) # # Perform OAuth authentication auth = tweepy.OAuthHandler(consumer_token, consumer_secret) auth.set_access_token(access_token, access_secret) # # Create the API and post the status update try: api = tweepy.API(auth) api.update_status(message) except tweepy.error.TweepError, e: print "Failed to post status update" print "Error: %s" % str(e) print "Using:" print " consumer[%s][%s]" % (consumer_token, consumer_secret) print " access[%s][%s]" % (access_token, access_secret) if __name__ == '__main__': tokens = sys.argv[1:] # twit(' '.join(tokens))
# -*- coding: utf-8 -*- from __future__ import with_statement, print_function, absolute_import import os from requests_oauthlib import OAuth1Session def create_oauth_token(expiration=None, scope=None, key=None, secret=None, name=None, output=True): """ Script to obtain an OAuth token from Trello. Must have TRELLO_API_KEY and TRELLO_API_SECRET set in your environment To set the token's expiration, set TRELLO_EXPIRATION as a string in your environment settings (eg. 'never'), otherwise it will default to 30 days. More info on token scope here: https://trello.com/docs/gettingstarted/#getting-a-token-from-a-user """ request_token_url = 'https://trello.com/1/OAuthGetRequestToken' authorize_url = 'https://trello.com/1/OAuthAuthorizeToken' access_token_url = 'https://trello.com/1/OAuthGetAccessToken' expiration = expiration or os.environ.get('TRELLO_EXPIRATION', "30days") scope = scope or os.environ.get('TRELLO_SCOPE', 'read,write') trello_key = key or os.environ['TRELLO_API_KEY'] trello_secret = secret or os.environ['TRELLO_API_SECRET'] name = name or os.environ.get('TRELLO_NAME', 'py-trello') # Step 1: Get a request token. This is a temporary token that is used for # having the user authorize an access token and to sign the request to obtain # said access token. session = OAuth1Session(client_key=trello_key, client_secret=trello_secret) response = session.fetch_request_token(request_token_url) resource_owner_key, resource_owner_secret = response.get('oauth_token'), response.get('oauth_token_secret') if output: print("Request Token:") print(" - oauth_token = %
s" % resource_owner_key) print(" - oauth_token_secret = %s" % resource_owner_secret) print("") # Step 2: Redirect to the provider. Since this is a CLI script we do not # redirect. In a web application you would redirect the user to the URL # below. print("Go to the following link in your browser:") print("{authorize_url}?oauth_token={oauth_token}&scope={scope}&expiration={expiration}&name={name}"
.format( authorize_url=authorize_url, oauth_token=resource_owner_key, expiration=expiration, scope=scope, name=name )) # After the user has granted access to you, the consumer, the provider will # redirect you to whatever URL you have told them to redirect to. You can # usually define this in the oauth_callback argument as well. # Python 3 compatibility (raw_input was renamed to input) try: inputFunc = raw_input except NameError: inputFunc = input accepted = 'n' while accepted.lower() == 'n': accepted = inputFunc('Have you authorized me? (y/n) ') oauth_verifier = inputFunc('What is the PIN? ') # Step 3: Once the consumer has redirected the user back to the oauth_callback # URL you can request the access token the user has approved. You use the # request token to sign this request. After this is done you throw away the # request token and use the access token returned. You should store this # access token somewhere safe, like a database, for future use. session = OAuth1Session(client_key=trello_key, client_secret=trello_secret, resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret, verifier=oauth_verifier) access_token = session.fetch_access_token(access_token_url) if output: print("Access Token:") print(" - oauth_token = %s" % access_token['oauth_token']) print(" - oauth_token_secret = %s" % access_token['oauth_token_secret']) print("") print("You may now access protected resources using the access tokens above.") print("") return access_token # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
impo
rt renderer
from model.contact import Contact #создаем скрипт для генерации групп с последующим сохранением в файл import random import string import os.path import jsonpickle import getopt import sys try: #почитай про трай opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts","file"]) #опция n задает кол-во генерируемых данных, опия ф задает файл, куда все должно помещатся except getopt.GetoptError as err: getopt.usage() sys.exit(2) n = 5 f = "data
/contacts.json" for o, a in opts: #данная структура (в общем) позволяет управлять скриптом получения параметров групп с использованием раздела Edit Configuration #мы можем задать число групп и адрес положения файла результата if o == "-n": n = int(a) elif o == "-f
": f = a def random_string(prefix, maxlen): #генерация случайных данных для теста symbols = string.ascii_letters+string.digits + string.punctuation + " "*10 #данные которые применяем в случайной строке return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) #случайным образом выбирает символы из заданной строки testdata = [Contact(firstname="", middlename="", lastname="")] + [ Contact(firstname="John", middlename="Jay", lastname="Johnson", home="123", mobile="456", work="789", email="a@mail.com", email2="b@mail.com", email3="c@mail.com", phone2="456") for i in range(random.randrange(n)) ] file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f) with open(file, "w") as out: #открываем файл с флагом w - write (запись) и что-то туда записываем jsonpickle.set_encoder_options("json", indent = 2) out.write(jsonpickle.encode(testdata)) #out.write(json.dumps(testdata, default=lambda x: x.__dict__, indent=2)) #функция dumps превращает структуру данных в строку в формате джейсон
#Made by Zachary C. on 9/21/16 last edited on 9/21/16 #CONSTANTS HOURS_DAY = 24 MINUTES_HOUR = 60 SECONDS_MINUTE = 60 #1. Greet the user and explain the program #2. Ask the
user to input the number of days #3. save the number of days days = float(input('This program converts days into hours, minutes, and seconds.\nPlease enter the number of days: ')) #4. Calculate the number of hours (days * hours in day) #5. Save the number of hours hours = days * HOURS_DAY #6. Calculate the number of minutes (hours * minutes in hour) #7. Save the number of minutes minutes = hours * MINUTES_HOUR #8. Calculate the number of seconds (minutes * seco
nds in minute) #9. Save the number of seconds seconds = minutes * SECONDS_MINUTE #10. Display mumber of hours #11. Display number of minutes #12. Display number of seconds #13. Signoff print('In ' , days , ' days there are ' , int(format(hours , '.0f')) , ' hours or ' , int(format(minutes , '.0f')) , ' minutes or ' , \ int(format(seconds , '.0f')) , ' seconds.\nThanks for using my program. Bye.' , sep='') #14. End
[parentNum]-4), int(data[parentNum])) getElem(data, searchNum(data, numpy.where(data==data[parentNum-3]), parentNum-3), hier,length,typeRC, int(data[parentNum]-3), int(data[parentNum])) elif data[parentNum-3] < length and data[parentNum-4] > length: #print (parentNum-4 , data[parentNum-4]) hier += str(int(data[parentNum-3])) + "." if (typeRC == "row"): rowLeafNum = rowLeafNum + 1 if int(data[parentNum-3]) > length: global content content['label'] = "null" else: global content content['label'] = rowNameArr[int(data[parentNum-3])][0] global content content['parent'] = int(int(data[parentNum])) global content content['id'] = int(data[parentNum-3]) global leafData leafData += str(content) + ", " dotLeafData.append(hier) else : colLeafNum = colLeafNum + 1 if int(data[parentNum-3]) > length: global colContent colContent['label'] = "null" else: global colContent colContent['label'] = colNameArr[int(data[parentNum-3])-1] global colContent colContent['parent'] = int(int(data[parentNum])) global colContent colContent['id'] = int(data[parentNum-3]) global colLeafData colLeafData += str(colCont
ent) + ", " global dotcolLeafData dotcolLeafData.append(hier) #print(content) #print (leafData[rowLeafNum]) #print (colLeafData[colLeafNum])
removeNum = len(str(int(data[parentNum-3]))) + 1 hier = hier[:-removeNum] getElem(data, searchNum(data, numpy.where(data==data[parentNum-4]), parentNum-4), hier, length,typeRC, int(data[parentNum]-4), int(data[parentNum])) elif data[parentNum-3] > length and data[parentNum-4] < length: #print (parentNum-3 , data[parentNum-3]) hier += str(int(data[parentNum-4])) + "." if (typeRC == "row"): rowLeafNum = rowLeafNum + 1 if int(data[parentNum-4]) > length: global content content['label'] = "null" else: global content content['label'] = rowNameArr[int(data[parentNum-4])][0] global content content['parent'] = int(int(data[parentNum])) global content content['id'] = int(data[parentNum-4]) global leafData leafData += str(content) + ", " global dotLeafData dotLeafData.append(hier) else : colLeafNum = colLeafNum + 1 if int(data[parentNum-4]) > length: global colContent colContent['label'] = "null" else: global colContent colContent['label'] = colNameArr[int(data[parentNum-4])-1] global colContent colContent['parent'] = int(int(data[parentNum])) global colContent colContent['id'] = int(data[parentNum-4]) global colLeafData colLeafData += str(colContent) + ", " global dotcolLeafData dotcolLeafData.append(hier) #print(content) removeNum = len(str(int(data[parentNum-4]))) + 1 hier = hier[:-removeNum] getElem(data, searchNum(data, numpy.where(data==data[parentNum-3]), parentNum-3), hier, length,typeRC, int(data[parentNum]-3), int(data[parentNum])) #print (leafData[rowLeafNum]) #print (colLeafData[colLeafNum]) else: hier += str(int(data[parentNum-4])) + "." if (typeRC == "row"): rowLeafNum = rowLeafNum + 1 if int(data[parentNum-4]) > length: global content content['label'] = "null" else: global content content['label'] = rowNameArr[int(data[parentNum-4])][0] global content content['parent'] = int(int(data[parentNum])) global content content['id'] = int(data[parentNum-4]) leafData += str(content) + ", " dotLeafData.append(hier) else : colLeafNum = colLeafNum + 1 if int(data[parentNum-4]) > length: global colContent colContent['label'] = "null" else: global colContent colContent['label'] = colNameArr[int(data[parentNum-4])-1] global colContent colContent['parent'] = int(int(data[parentNum])) global colContent colContent['id'] = int(data[parentNum-4]) global colLeafData colLeafData += str(colContent) + ", " global dotcolLeafData dotcolLeafData.append(hier) #print(content) #print (parentNum-4 , data[parentNum-4]) #print(hier) removeNum = len(str(int(data[parentNum-4]))) + 1 hier = hier[:-removeNum] hier += str(int(data[parentNum-3])) + "." #print (parentNum-3 , data[parentNum-3]) #print(hier) #print (parentNum-3 , data[parentNum-3]) #print (parentNum-4 , data[parentNum-4]) if (typeRC == "row"): rowLeafNum = rowLeafNum + 1 #print("length : " + str(length)) #print("int(data[parentNum]): " + str(int(data[parentNum]))) if int(data[parentNum-3]) > length: global content content['label'] = "null" else: global content content['label'] = rowNameArr[int(data[parentNum-3])][0] global content content['parent'] = int(int(data[parentNum])) global content content['id'] = int(data[parentNum-3]) leafData += str(content) + ", " dotLeafData.append(hier) else : colLeafNum = colLeafNum + 1 if int(data[parentNum-3]) > length: global colContent colContent['label'] = "null" else: global colContent colContent['label'] = colNameArr[int(data[parentNum-3])-1] global colContent colContent['parent'] = int(int(data[parentNum])) global colContent colContent['id'] = int(data[parentNum-3]) global colLeafData colLeafData += str(colContent) + ", " global dotcolLeafData dotcolLeafData.append(hier) #print (leafData[rowLeafNum]) #print (colLeafData[colLeafNum]) #print(content) #print(rowNameArr[int(data[parentNum-3])]) """if (data[parentNum-4] <= len(linkageMatrix)): hier += str(int(data[parentNum-4])) + "." leafData.append(hier) #print (hier) isChecked = 1 # print (parentNum-3 , data[parentNum-3]) if (data[parentNum-3] <= len(linkageMatrix)): if isChecked == 1 : removeNum = len(str(int(data[parentNum-4]))) + 1 hier = hier[:-removeNum] hier += str(int(data[parentNum-3])) + "." leafData.append(hier) #print (parentNum-4 , data[parentNum-4]) #print (hier)""" def searchNum (data, index, pId):
r""" Description: Generates 2-D data maps from OpenFoam data saved by paraview as a CSV file. The data has to be saved as point data and the following fields are expected p, points:0->2, u:0->2. An aperture map is the second main input and is used to generate the interpolation coordinates as well as convert the flow velocities into volumetic flow rates. This script assumes the OpenFoam simulation was performed on a geometry symmetric about the X-Z plane. For usage information run: ``apm_process_paraview_data -h`` | Written By: Matthew stadelman | Date Written: 2016/09/29 | Last Modfied: 2017/04/23
| """ import argparse from argparse import RawDescriptionHelpFormatter as RawDesc import os import scip
y as sp from scipy.interpolate import griddata from apmapflow import _get_logger, set_main_logger_level, DataField # setting up logger set_main_logger_level('info') logger = _get_logger('apmapflow.scripts') # setting a few convenience globals avg_fact = None voxel_size = None base_name = None # creating arg parser parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawDesc) # adding arguments parser.add_argument('-v', '--verbose', action='store_true', help='debug messages are printed to the screen') parser.add_argument('-o', '--output-dir', type=os.path.realpath, default=os.getcwd(), help='''outputs file to the specified directory, sub-directories are created as needed''') parser.add_argument('--rho', type=float, default=1000, help='fluid density for kinematic pressure conversion') parser.add_argument('data_file', type=os.path.realpath, help='paraview CSV data file') parser.add_argument('map_file', type=os.path.realpath, help='matching aperture map used for OpenFoam simulation') parser.add_argument('voxel_size', type=float, help='voxel to meter conversion factor of aperture map') parser.add_argument('avg_fact', type=float, help='''horizontal averaging factor of aperture map''') parser.add_argument('base_name', nargs='?', default=None, help='''base name to save fields as, i.e. base_name + "-p-map.txt", defaults to the name of the CSV file''') def main(): r""" Processes commandline args and runs script """ global avg_fact, voxel_size, base_name # args = parser.parse_args() if args.verbose: set_main_logger_level('debug') # # these will be command-line args para_infile = args.data_file aper_infile = args.map_file avg_fact = args.avg_fact voxel_size = args.voxel_size # base_name = args.base_name if base_name is None: base_name = os.path.basename(para_infile).split('.')[0] base_name = os.path.join(args.output_dir, base_name) # aper_map, data_dict = read_data_files(para_infile, aper_infile) map_coords, data_coords = generate_coordinate_arrays(aper_map, data_dict) save_data_maps(map_coords, data_coords, aper_map, data_dict, args.rho) def read_data_files(para_file, map_file): r""" Reads in the paraview data file and aperture map file. """ # # reading aperture map logger.info('reading aperture map...') aper_map = DataField(map_file) # # reading first line of paraview file to get column names logger.info('reading paraview data file') with open(para_file, 'r') as file: cols = file.readline() cols = cols.strip().replace('"', '').lower() cols = cols.split(',') # # reading entire dataset and splitting into column vectors data = sp.loadtxt(para_file, delimiter=',', dtype=float, skiprows=1) data_dict = {} for i, col in enumerate(cols): data_dict[col] = data[:, i] # return aper_map, data_dict def generate_coordinate_arrays(aper_map, para_data_dict): r""" Generates the coordinate arrays to use in data interpolation for coverting paraview point data into a 2-D data map. """ # # generating XYZ coordinates from map to interpolate to logger.info('calculating aperture map cell center coordinates...') temp = sp.arange(aper_map.data_map.size, dtype=int) temp = sp.unravel_index(temp, aper_map.data_map.shape[::-1]) map_coords = sp.zeros((aper_map.data_map.size, 3), dtype=float) # # half voxel added to make map points be cell centers map_coords[:, 0] = temp[0] * avg_fact * voxel_size + voxel_size/2.0 map_coords[:, 2] = temp[1] * avg_fact * voxel_size + voxel_size/2.0 # # pulling XYZ coordinates from the data file logger.info('processing data file data for coordinates...') data_coords = sp.zeros((para_data_dict['points:0'].shape[0], 3)) data_coords[:, 0] = para_data_dict['points:0'] data_coords[:, 1] = para_data_dict['points:1'] data_coords[:, 2] = para_data_dict['points:2'] # return map_coords, data_coords def save_data_maps(map_coords, data_coords, aper_map, data_dict, density): r""" Converts the raw paraview point data into a 2-D data distribution and saves the file by appending to the base_name. """ # # generating p field logger.info('generating and saving pressure field...') field = data_dict['p'] * density # openFoam outputs kinematic pressure field = griddata(data_coords, field, map_coords, method='nearest') field = sp.reshape(field, aper_map.data_map.shape[::-1]) sp.savetxt(base_name+'-p-map.txt', field.T, delimiter='\t') # # generating Ux -> Qx field logger.info('generating and saving Qx field...') field = data_dict['u:0'] field = griddata(data_coords, field, map_coords, method='nearest') field = sp.reshape(field, aper_map.data_map.shape[::-1]) field = field * aper_map.data_map.T * voxel_size**2 sp.savetxt(base_name+'-qx-map.txt', field.T, delimiter='\t') # # generating Uz -> Qz field logger.info('generating and saving Qz field...') field = data_dict['u:2'] field = griddata(data_coords, field, map_coords, method='nearest') field = sp.reshape(field, aper_map.data_map.shape[::-1]) field = field * aper_map.data_map.T * voxel_size**2 sp.savetxt(base_name+'-qz-map.txt', field.T, delimiter='\t') # # generating Um -> Qm field logger.info('generating and saving Q magnitude field...') field = sp.sqrt(data_dict['u:0'] ** 2 + data_dict['u:2'] ** 2) field = griddata(data_coords, field, map_coords, method='nearest') field = sp.reshape(field, aper_map.data_map.shape[::-1]) field = field * aper_map.data_map.T * voxel_size**2 sp.savetxt(base_name+'-qm-map.txt', field.T, delimiter='\t')
, 'verified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'vote': ('helios.datatypes.djangofield.LDObjectField', [], {}), 'vote_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'vote_tinyhash': ('django.db.models.fields.CharField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}), 'voter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Voter']"}) }, 'helios.election': { 'Meta': {'object_name': 'Election'}, 'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios_auth.User']"}), 'archived_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'cast_url': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'complaint_period_ends_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'datatype': ('django.db.models.fields.CharField', [], {'default': "'legacy/Election'", 'max_length': '250'}), 'description': ('django.db.models.fields.TextField', [], {}), 'election_type': ('django.db.models.fields.CharField', [], {'default': "'election'", 'max_length': '250'}), 'eligibility': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'encrypted_tally': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'featured_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'frozen_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'openreg': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'private_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'private_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'public_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'questions': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'registration_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'result': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'result_proof': ('helios_auth.jsonfield.JSONField', [], {'null': 'True'}), 'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'tallies_combined_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'tallying_finished_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'tallying_started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'tallying_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'use_advanced_audit_features': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'use_voter_aliases': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'uuid': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'voters_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'voting_ended_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'voting_ends_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'voting_extended_until': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'voting_started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'voting_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}) }, 'helios.electionlog': { 'Meta': {'object_name': 'ElectionLog'}, 'at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'log': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'helios.trustee': { 'Meta': {'object_name': 'Trustee'}, 'decryption_factors': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'decryption_proofs': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'pok': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'public_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'public_key_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'secret_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'helios.voter': { 'Meta': {'unique_together': "(('election', 'voter_login_id'),)", 'object_name': 'Voter'}, 'alias': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'cast_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios_auth.User']", 'null': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'vote': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}), 'vote_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'voter_email': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}), 'voter_login_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'voter_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'voter_password': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}) }, 'helios.voterfile': { 'Meta': {'object_name': 'VoterFile'}, 'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm
['helios.Election']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'num_voters'
: ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'processing_finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'processing_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField'
#!/usr/bin/env python3 import sys from collections import defaultdict, deque from dataclasses import dataclass @dataclass class Nobe: children: object metadata: object argh = 0 def parse(data): global argh children = data.popleft() metadata = data.popleft() print(children, metadata) nobe = Nobe([], []) for x in range(children): n
obe.children.append(parse(data)) for x in range(metadata): argh += data.popleft() def main(args): data = [s.strip() for s in sys.stdin][0] d
ata = deque([int(x) for x in data.split(' ')]) print(data) print(len(data)) parse(data) print(argh) if __name__ == '__main__': sys.exit(main(sys.argv))
from chatterbot.adapters import Adapter from chatterbot.adapters.exceptions import AdapterNotImplementedError class IOAdapter(Adapter): """ This is an abstract class that
represents the interface that all input-output adapters should implement. """ def process_input(self): """ Re
turns data retrieved from the input source. """ raise AdapterNotImplementedError() def process_response(self, input_value): """ Takes an input value. Returns an output value. """ raise AdapterNotImplementedError()
: iv = fscale * sp.verts0[vi] newv[vi] = iv #vv = v / numpy.sqrt ( numpy.sum (v*v) ) #sv = vv * min ( rad, rad0 ) #newv[vi] = sphf * sv + (1.0-sphf) * iv sp.geometry = (newv,tris) for vi, v in enumerate ( smod.icosVerts0 ) : smod.icosVerts[vi] = fscale * smod.icosVerts0[vi] #p1 = smod.icosVerts [ tris[0][0] ] #r = numpy.sqrt ( numpy.sum(p1*p1) ) #p1 = smod.icosVerts0 [ tris[0][0] ] #r0 = numpy.sqrt ( numpy.sum(p1*p1) ) #print "Icos - rad %.4f, orig: %.4f" % (r, r0) def GetMod ( self, name ) : for m in chimera.openModels.list() : if m.name == name : return m return None def MakeTNorms ( self, smod ) : self.umsg ( "Making triangle norms for %d" % len(smod.sps) ) for spi, sp in enumerate ( smod.sps ) : verts2, tris2 = sp.geometry #sp.tdirs = [None] * len(tris2) sp.tdirs = numpy.zeros ( ( len(tris2), 3 ) ) sp.tnorms = [None] * len(tris2) for ti, tri in enumerate ( tris2 ) : p1 = verts2 [ tri[0] ] p2 = verts2 [ tri[1] ] p3 = verts2 [ tri[2] ] mp = (p1 + p2 + p3) / 3.0 l = numpy.sqrt ( numpy.sum(mp*mp) ) sp.tdirs[ti] = mp / l v1 = p2 - p1 v2 = p3 - p1 N = numpy.cross ( v1, v2 ) l = numpy.sqrt ( numpy.sum(N*N) ) sp.tnorms [ti] = N / l def MinRad2 ( self, smod ) : minr = 1e9 for sp in smod.surfacePieces : verts2, tris2 = sp.geometry for v in verts2 : r = numpy.sum ( v * v ) if r < minr : minr = r #return numpy.sqrt ( minr ) return minr def MaxRad2 ( self, smod ) : maxr = 0 for sp in smod.surfacePieces : verts2, tris2 = sp.geometry for v in verts2 : r = numpy.sum ( v * v ) if r > maxr : maxr = r #return numpy.sqrt ( maxr ) return maxr def PIsOutside ( self, p, smod ) : #print "pt - %d surfps" % len(surfm.surfacePieces) #min_i = 0 #max_d = -1e7 #max_n = None #for nvi, nv in enumerate ( smod.nvecs ) : # d = numpy.dot ( p, nv ) # if d > max_d : # min_i = nvi # max_d = d # max_n = nv max_i = numpy.argmax ( numpy.sum ( smod.nvecs * p, axis = 1 ) ) max_n = smod.nvecs [ max_i ] tri = smod.icosTris [ max_i ] p1 = smod.icosVerts [ tri[0] ] #p2 = smod.icosVerts [ tri[1] ] #p3 = smod.icosVerts [ tri[2] ] #v1 = p2 - p1 #v2 = p3 - p1 #N = numpy.cross ( v1, v2 ) pv = p - p1 d = numpy.dot ( pv, max_n ) if d <= 0.0 : #print " - inside the tri ", min_i return False #return True sp = smod.sps[max_i] #if sp.ind != min_i and not hasattr (sp, 'flagged') : # print sp.ind, "?" # sp.flagged = True verts2, tris2 = sp.geometry #if not hasattr ( sp, 'tdirs' ) : #sp.tdirs = [None] * len(tris2) #sp.tnorms = [None] * len(tris2) #min_i = 0 #max_d = -1e7 #for ti, tri in enumerate ( tris2 ) : # d = numpy.dot ( p, sp.tdirs[ti] ) # if d > max_d : # max_d = d # min_i = ti max_i = numpy.argmax ( numpy.sum ( sp.tdirs * p, axis = 1 ) ) tri = tris2[max_i] p1 = verts2 [ tri[0] ] pv = p - p1 d = numpy.dot ( pv, sp.tnorms [max_i] ) if d <= 0.0 : #print " - inside the tri ", min_i return False return True def Icos2Map0 ( self ) : smod = self.GetMod ( "Icosahedron Faces" ) if smod == None : self.umsg ( "No Icosahedron2 model found" ) return dmap = segmentation_map() if dmap == None : self.umsg ( "Select a map in Segment Map dialog" ) return sepRs = self.segRads.get().split(",") print "Sep rads:", sepRs if len(sepRs) != 2 : self.umsg ( "Enter two radii separated by a comma" ) return try : start_rad = int ( sepRs[0] ) except : self.umsg ( "Invalid start radius: " + sepRs[0] ) return try : end_rad = int ( sepRs[1] ) except : self.umsg ( "Invalid end radius: " + sepRs[1] ) return if end_rad <= start_rad : self.umsg ( "End rad should be larger than start rad :) " ) return self.umsg ( "Mas
k %s, %d -> %d" % (dmap.name,start_rad,end_rad) ) self.MakeTNorms ( smod ) import time start = time.time() mm = dmap.full_matrix () #m1 = numpy.zeros_like ( mm ) # transform to index reference frame of ref_map f1 = dmap.data.ijk_to_xyz_tr
ansform from _contour import affine_transform_vertices as transform_vertices #f2 = xform_matrix ( mask_map.openState.xform ) #f3 = xform_matrix ( ref_map.openState.xform.inverse() ) #f4 = ref_map.data.xyz_to_ijk_transform #tf = multiply_matrices( f2, f1 ) #tf = multiply_matrices( f3, tf ) #tf = multiply_matrices( f4, tf ) nm = numpy.zeros_like ( mm ) self.updateIcos2 ( start_rad ) minr, maxr = self.MinRad2 ( smod ), self.MaxRad2 ( smod ) print " - start rad %d -- min rad %.1f, max rad %.1f" % ( start_rad, numpy.sqrt(minr), numpy.sqrt(maxr)) done = time.time() elapsed = done - start print "Took: ", elapsed pt = numpy.array ( [[0,0,0]], numpy.float32 ) p = pt[0] for i in range ( dmap.data.size[0] ) : self.status ( "Masking %s, outside radius %d, %d/%d" % (dmap.name, start_rad, i+1, dmap.data.size[0]) ) p[0] = i * f1[0][0] + f1[0][3] for j in range ( dmap.data.size[1] ) : p[1] = j * f1[1][1] + f1[1][3] for k in range ( dmap.data.size[2] ) : #p[2] = k * f1[2][2] + f1[2][3] #pt = numpy.array ( [[i,j,k]], numpy.float32 ) #p[0],p[1],p[2] = ti,tj,tk #transform_vertices ( pt, f1 ) p[2] = k * f1[2][2] + f1[2][3] ptr = numpy.sum ( p*p ) if ptr < minr : pass elif ptr > maxr : nm[k,j,i] = mm[k,j,i] elif self.PIsOutside ( pt[0], smod ) : nm[k,j,i] = mm[k,j,i] self.updateIcos2 ( end_rad ) minr, maxr = self.MinRad2 ( smod ), self.MaxRad2 ( smod ) print " - end rad %d -- min rad %.1f, max rad %.1f" % (start_rad, numpy.sqrt(minr), numpy.sqrt(maxr)) for i in range ( dmap.data.size[0] ) : self.status ( "Masking %s, inside radius %d, %d/%d" % (dmap.name, end_rad, i+1, dmap.data.size[0]) ) p[0] = i * f1[0][0] + f1[0][3] for j in range ( dmap.data.size[1] ) : p[1] = j * f1[1][1] + f1[1][3] for k in range ( dmap.data.size[2] ) : #pt = numpy.array ( [[i,j,k]], numpy.float32 ) #p[0],p[1],p[2] = ti,tj,tk #transform_vertices ( pt, f1 ) p[2] = k * f1[2][2] + f1[2][3] ptr = numpy.sum ( p*p ) if ptr < minr : continue elif ptr > maxr : nm[k,j,i] = 0.0 elif self.PIsOutside ( p, smod ) : nm[k,j,i] = 0.0 ndata = VolumeData.Array_Grid_Data ( nm, dmap.data.origin, dmap.data.step, dmap.data.cell_angles ) try : nvg = VolumeViewer.volume.add_data_set ( ndata, None ) except : nvg = VolumeViewer.volume.volume_from_grid_data ( ndata ) nvg.name = dmap.name + "__%d--to--%d_fast" % (start_rad, end_rad) don
len(updated_keys), len(removing_keys)), Style.RESET_ALL) # check verification failed items target_verified_items = None if len(reversed_matched_kv): target_verified_items = { k: {'ratio': reversed_matched_kv[k]["ratio"], 'original': base_kv[k], 'reversed': reversed_translated_kv[k], 'translated': translated_kv[k]} for k in reversed_matched_kv.keys()} return updated_content and (len(adding_keys) > 0 or len(updated_keys) > 0 or len( removing_keys) > 0), updated_content, translated_kv, target_error_lines, target_verified_items def write_file(target_file, parsed_list): suc = False try: f = codecs.open(target_file, "w", "utf-8") contents = '' for content in parsed_list: if content['comment']: contents += '/*{0}*/'.format(content['comment']) + '\n' contents += '"{0}" = "{1}";'.format(content['key'], content['value']) + '\n' f.write(contents) suc = True except IOError: print('IOError to open', target_file) finally: f.close() return suc def remove_file(target_file): try: os.rename(target_file, target_file + '.deleted') return True except IOError: print('IOError to rename', target_file) return False def create_file(target_file): open(target_file, 'a').close() def notexist_or_empty_file(target_file): return not os.path.exists(target_file) or os.path.getsize(target_file) == 0 def resolve_file_names(target_file_names): return map(lambda f: f.decode('utf-8'), filter(lambda f: f.endswith(__FILE_SUFFIX__) or f.endswith(__FILE_INTENT_SUFFIX__), target_file_names)) base_dict = {} results_dict = {} # Get Base Language Specs walked = list(os.walk(__RESOURCE_PATH__, topdown=True)) # Init with Base.lproj for dir, subdirs, files in walked: if os.path.basename(dir) == __BASE_RESOUCE_DIR__: for _file in resolve_file_names(files): f = os.path.join(dir, _file) if notexist_or_empty_file(f): continue parsed_objs = None # parse .strings if f.endswith(__FILE_SUFFIX__): parsed_objs = strparser.parse_strings(filename=f) # parse .intentdefinition elif f.endswith(__FILE_INTENT_SUFFIX__): print('[i] Found "{0}" in {1}. Parse ....'.format(os.path.basename(f), __BASE_RESOUCE_DIR__)) parsed_objs = strparser_intentdefinition.parse_strings(f) # replace to dest extenstion .strings _file = _file.replace(__FILE_INTENT_SUFFIX__, __FILE_SUFFIX__) # write original .strings file to local write_file(os.path.join(dir, _file), parsed_objs) if not parsed_objs: continue base_dict[_file] = parsed_objs if not base_dict: print('[!] Not found "{0}" in target path "{1}"'.format(__BASE_RESOUCE_DIR__, __RESOURCE_PATH__)) sys.exit(0) # Exist or Create supporting lproj dirs. print('Check and verifiy resources ...') current_lproj_names = [os.path.splitext(os.path.basename(lproj_path))[0] for lproj_path in filter(lambda d: d.endswith(__DIR_SUFFIX__), [dir for dir, subdirs, files in walked])] notexisted_lproj_names = list(set(__XCODE_LPROJ_SUPPORTED_LOCALES__) - set(current_lproj_names)) creating_lproj_dirs = [expanduser(os.path.join(__RESOURCE_PATH__, ln + __DIR_SUFFIX__)) for ln in notexisted_lproj_names] if creating_lproj_dirs: print('Following lproj dirs does not exists. Creating ...') for d in creating_lproj_dirs: print('Created', d) os.mkdir(d) # Start to sync localizable files. print('Start synchronizing...') for file in base_dict: print('Target:', file) for dir, subdirs, files in walked: files = resolve_file_names(files) if dir.endswith((__DIR_SUFFIX__)): lproj_name = os.path.basename(dir).split(__DIR_SUFFIX__)[0] if lproj_name == __BASE_LANG__: continue if not lproj_name in __XCODE_LPROJ_SUPPORTED_LOCALES_MAP__: print('Does not supported: ', lproj_name) continue lc = __XCODE_LPROJ_SUPPORTED_LOCALES_MAP__[lproj_name] if strlocale.matched_locale_code(lc, __EXCLUDING_LANGS__): print('Skip: ', lc) continue results_dict[lc] = { 'deleted_files': [], 'added_files': [], 'updated_files': [], 'skipped_files': [], 'translated_files_lines': {}, 'error_lines_kv': {}, 'verified_result': {} } # if not supported_lang(lc): # print('Does not supported: ', lc) # results_dict[lc]['skipped_files'] = join_path_all(dir, files) # continue print('\n', 'Analayzing localizables... {1} (at {0})'.format(dir, lc)) added_files = list(set(base_dict.keys()) - set(files)) removed_files = list(set(files) - set(base_dict.keys())) existing_files = list(set(files) - (set(added_files) | set(removed_files))) added_files = join_path_all(dir, added_files) removed_files = join_path_all(dir, removed_files) existing_files = join_path_all(dir, existing_files) added_cnt, updated_cnt, removed_cnt = 0, 0, 0 translated_files_lines = results_dict[lc]['translated_files_lines'] error_files = results_dict[lc]['error_lines_kv'] # remove - file for removed_file in removed_files: print('Removing File... {0}'.format(removed_file)) if remove_file(removed_file): removed_cnt += 1 # add - file for added_file in added_files: print('Adding File... {0}'.format(added_file)) create_file(added_file)
u, c, t, e, m = synchronize(added_file, lc) # error i
f e: error_files[added_file] = e # normal elif u and write_file(added_file, c): added_cnt += 1 translated_files_lines[added_file] = t # verify failed for k in (m or {}): results_dict[lc]['verified_result'][k] = m[k] # exist - lookup lines for ext_file in existing_files: u, c, t, e, m = synchronize(ext_file, lc) # error if e: error_files[ext_file] = e # normal elif u: print('Updating File... {0}'.format(ext_file)) if write_file(ext_file, c): updated_cnt = +1 translated_files_lines[ext_file] = t # verify failed for k in (m or {}): results_dict[lc]['verified_result'][k] = m[k] if added_cnt or updated_cnt or removed_cnt or error_files: print(Fore.WHITE + '(i) Changed Files : Added {0}, Updated {1}, Removed {2}, Error {3}'.format( added_cnt, updated_cnt, removed_cnt, len(error_files.keys())), Style.RESET_ALL) else: print('Nothing to translate or add.') """ Results """ results_dict[lc]['deleted_files'] = removed_files
from collections import defaultdict from django.core.files.storage import DefaultStorage from django.core.management.base import BaseCommand, CommandError from candidates.csv_helpers import list_to_csv, memberships_dicts_for_csv from elections.models import Election def safely_write(output_filename, memberships_list): """ Use Django's storage backend to write the CSV file to the MEDIA_ROOT. If using S3 (via Django Storages) the file is atomically written when the file is closed (when the context manager closes). That is, the file can be opened and written to but nothing changes at the public S3 URL until the object is closed. Meaning it's not possible to have a half written file. If not using S3, there will be a short time where the file is empty during write. """ csv = list_to_csv(memberships_list) file_store = DefaultStorage() with file_store.open(output_filename, "wb") as out_file: out_file.write(csv.encode("utf-8")) class Command(BaseCommand): help = "Output CSV files for all elections" def add_arguments(self, parser): parser.add_argument( "--site-base-url", help="The base URL of the site (for full image URLs)", ) parser.add_argument( "--election", metavar="ELECTION-SLUG", help="Only output CSV for the election with this slug", ) def slug_to_file_name(self, slug): return "{}-{}.csv".format(self.output_prefix, slug) def handle(self, **options): if options["election"]: try: election = Election.objects.get(slug=options["election"]) election_slug = election.slug except Electi
on.D
oesNotExist: message = "Couldn't find an election with slug {election_slug}" raise CommandError( message.format(election_slug=options["election"]) ) else: election_slug = None self.options = options self.output_prefix = "candidates" membership_by_election, elected_by_election = memberships_dicts_for_csv( election_slug ) # Write a file per election, optionally adding candidates # We still want a file to exist if there are no candidates yet, # as the files linked to as soon as the election is created election_qs = Election.objects.all() if election_slug: election_qs = election_qs.filter(slug=election_slug) for election in election_qs: safely_write( self.slug_to_file_name(election.slug), membership_by_election.get(election.slug, []), ) # Make a CSV file per election date slugs_by_date = defaultdict(list) for slug in membership_by_election.keys(): slugs_by_date[slug.split(".")[-1]].append(slug) for date, slugs in slugs_by_date.items(): memberships_for_date = [] for slug in slugs: memberships_for_date += membership_by_election[slug] safely_write(self.slug_to_file_name(date), memberships_for_date) # If we're not outputting a single election, output all elections if not election_slug: sorted_elections = sorted( membership_by_election.keys(), key=lambda key: key.split(".")[-1], ) all_memberships = [] all_elected = [] for slug in sorted_elections: all_memberships += membership_by_election[slug] all_elected += elected_by_election[slug] safely_write(self.slug_to_file_name("all"), all_memberships) safely_write(self.slug_to_file_name("elected-all"), all_elected)
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class EffectiveNetworkSecurityRule(Model): """Effective network security rules. :param name: The name of the security rule specified by the user (if created by the user). :type name: str :param protocol: The network protocol this rule applies to. Possible values are: 'Tcp', 'Udp', and 'All'. Possible values include: 'Tcp', 'Udp', 'All' :type protocol: str or ~azure.mgmt.network.v2017_10_01.models.EffectiveSecurityRuleProtocol :param source_port_range: The source port or range. :type source_port_range: str :param destination_port_range: The destination port or range. :type destination_port_range: str :param source_port_ranges: The source port ranges. Expected values include a single integer between 0 and 65535, a range using '-' as seperator (e.g. 100-400), or an asterix (*) :type source_port_ranges: list[str] :param destination_port_ranges: The destination port ranges. Expected values include a single integer between 0 and 65535, a range using '-' as seperator (e.g. 100-400), or an asterix (*) :type destination_port_ranges: list[str] :param source_address_prefix: The source address prefix. :type source_address_prefix: str :param destination_address_prefix: The destination address prefix. :type destination_address_prefix: str :param source_address_prefixes: The source address prefixes. Expected values include CIDR IP ranges, Default Tags (VirtualNetwork, AureLoadBalancer, Internet), System Tags, and the asterix (*). :type source_address_prefixes: list[str] :param destination_address_prefixes: The destination address prefixes. Expected values include CIDR IP ranges, Default Tags (VirtualNetwork, AureLoadBalancer, Internet), System Tags, and the asterix (*). :type destination_address_prefixes: list[str] :param expanded_source_address_prefix: The expanded source address prefix. :type expanded_source_address_prefix: list[str] :param expanded_destination_address_prefix: Expanded destination address prefix. :type expanded_destination_address_prefix: list[str] :param access: Whether network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow', 'Deny' :type access: str or ~azure.mgmt.network.v2017_10_01.models.SecurityRuleAccess :param priority: The priority of the rule. :type priority: int :param direction: The direction of the rule. Possible values are: 'Inbound and Outbound'. Possible values include: 'Inbound', 'Outbound' :type direction: str or ~azure.mgmt.network.v2017_10_01.models.SecurityRuleDirection """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'protocol': {'key': 'protocol', 'type': 'str'}, 'source_port_range': {'key': 'sourcePortRange', 'type': 'str'}, 'destination_
port_range': {'key': 'destinationPortRange', 'type': 'str'}, 'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'}, 'destination_port_ranges': {'key': 'destinationPortRanges', 'type': '[str]'}, 'source_addre
ss_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, 'destination_address_prefix': {'key': 'destinationAddressPrefix', 'type': 'str'}, 'source_address_prefixes': {'key': 'sourceAddressPrefixes', 'type': '[str]'}, 'destination_address_prefixes': {'key': 'destinationAddressPrefixes', 'type': '[str]'}, 'expanded_source_address_prefix': {'key': 'expandedSourceAddressPrefix', 'type': '[str]'}, 'expanded_destination_address_prefix': {'key': 'expandedDestinationAddressPrefix', 'type': '[str]'}, 'access': {'key': 'access', 'type': 'str'}, 'priority': {'key': 'priority', 'type': 'int'}, 'direction': {'key': 'direction', 'type': 'str'}, } def __init__(self, *, name: str=None, protocol=None, source_port_range: str=None, destination_port_range: str=None, source_port_ranges=None, destination_port_ranges=None, source_address_prefix: str=None, destination_address_prefix: str=None, source_address_prefixes=None, destination_address_prefixes=None, expanded_source_address_prefix=None, expanded_destination_address_prefix=None, access=None, priority: int=None, direction=None, **kwargs) -> None: super(EffectiveNetworkSecurityRule, self).__init__(**kwargs) self.name = name self.protocol = protocol self.source_port_range = source_port_range self.destination_port_range = destination_port_range self.source_port_ranges = source_port_ranges self.destination_port_ranges = destination_port_ranges self.source_address_prefix = source_address_prefix self.destination_address_prefix = destination_address_prefix self.source_address_prefixes = source_address_prefixes self.destination_address_prefixes = destination_address_prefixes self.expanded_source_address_prefix = expanded_source_address_prefix self.expanded_destination_address_prefix = expanded_destination_address_prefix self.access = access self.priority = priority self.direction = direction
from flask import render_template, jsonify, url_for, abort, request, redirect, current_app from flask_wtf import Form from flask_user import current_user from silverflask import db from silverflask.models import User from silverflask.fields import GridField from silverflask.core import Controller from silverflask.controllers.cms_controller import CMSController class SecurityController(CMSController):
url_prefix = CMSController.url_prefix + '/security' urls = { '/edit/<int:record_id>': 'edit_user', '/gridfield': 'get_users', '/': 'form' } allowed_actions = { 'edit_user' } @staticmethod def edit_user(record_id):
user_obj = db.session.query(User).get(record_id) if not user_obj: abort("Not found", 404) form_class = User.get_cms_form() form = form_class(request.form, obj=user_obj) if form.validate_on_submit(): form.populate_obj(user_obj) if form['new_password'].data: user_obj.set_password(form['new_password'].data) db.session.commit() return redirect(url_for(".form")) return render_template("data_object/edit.html", elem=user_obj, form=form) @staticmethod def get_users(): q = User.query.all() res = [] for r in q: d = r.as_dict() d.update({"edit_url": url_for(".edit_user", record_id=r.id)}) res.append(d) return jsonify(data=res) @staticmethod def form(): class SecurityForm(Form): gridfield = GridField( urls={"get": url_for(".get_users")}, buttons=[], display_cols=["id", "name"] ) return render_template("assetmanager.html", form=SecurityForm())
# Copyright (C) 2013, Walter Bender - Raul Gutierrez Segales # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from gettext import gettext as _ from gi.repository import GLib from gi.repository import Gtk from gi.repository import Gdk from jarabe.webservice.accountsmanager import get_webaccount_services from jarabe.controlpanel.sectionview import SectionView from sugar3.graphics.icon import CanvasIcon, Icon from sugar3.graphics import style def get_service_name(service): if hasattr(service, '_account'): if hasattr(service._account, 'get_description'): return service._account.get_description() return '' class WebServicesConfig(SectionView): def __init__(self, model, alerts): SectionView.__init__(self) self._model = model self.restart_alerts = alerts services = get_webaccount_services() grid = Gtk.Grid() if len(services) == 0: grid.set_row_spacing(style.DEFAULT_SPACING) icon = Icon(pixel_size=style.LARGE_ICON_SIZE, icon_name='module-webaccount', stroke_color=style.COLOR_BUTTON_GREY.get_svg(), fill_color=style.COLOR_TRANSPARENT.get_svg()) grid.attach(icon, 0, 0, 1, 1) icon.show() label = Gtk.Label() label.set_justify(Gtk.Justification.CENTER) label.set_markup( '<span foreground="%s" size="large">%s</span>' % (style.COLOR_BUTTON_GREY.get_html(), GLib.markup_escape_text( _('No web services are installed.\n' 'Please visit %s for more details.' % 'http://wiki.sugarlabs.org/go/WebServices')))) label.show() grid.attach(label, 0, 1, 1, 1) alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1) alignment.add(grid) grid.show() self.add(alignment) alignment.show() return grid.set_row_spacing(style.DEFAULT_SPACING * 4) grid.set_column_spacing(style.DEFAULT_SPACING * 4) grid.set_border_width(style.DEFAULT_SPACING * 2) grid.set_column_homogeneous(True) width = Gdk.Screen.width() - 2 * style.GRID_CELL_SIZE nx = int(width / (style.GRID_CELL_SIZE + style.DEFAULT_SPACING * 4)) self._service_config_box = Gtk.VBox() x = 0 y = 0 for service in services: service_grid = Gtk.Grid() icon = CanvasIcon(icon_name=service.get_icon_name()) icon.show() service_grid.attach(icon, x, y, 1, 1) icon.connect('activate', service.config_service_cb, None, self._service_config_box) label = Gtk.Label() label.set_justify(Gtk.Justification.CENTER) name = get_service_name(service) label.set_markup(name) service_grid.attach(label, x, y + 1, 1, 1) label.show() grid.a
ttach(service_grid, x, y, 1, 1) service_grid.show()
x += 1 if x == nx: x = 0 y += 1 alignment = Gtk.Alignment.new(0.5, 0, 0, 0) alignment.add(grid) grid.show() vbox = Gtk.VBox() vbox.pack_start(alignment, False, False, 0) alignment.show() scrolled = Gtk.ScrolledWindow() vbox.pack_start(scrolled, True, True, 0) self.add(vbox) scrolled.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC) scrolled.show() workspace = Gtk.VBox() scrolled.add_with_viewport(workspace) workspace.show() workspace.add(self._service_config_box) workspace.show_all() vbox.show() def undo(self): pass
confirm.") def must_exist(p): if not p.exists(): raise Exception("No such file: %s" % p.absolute()) def run_cmd(ctx, chdir, args): cmd = ' '.join(map(str, args)) print("Invoke {}".format(cmd)) with cd(chdir): ctx.run(cmd, pty=True) class DocTree(object): """ Base class for a doctree descriptor. Atelier currently supports `Sphinx <http://www.sphinx-doc.org/en/stable/>`__ and `Nikola <https://getnikola.com/>`__ docs. """ src_path = None out_path = None has_intersphinx = False # html_baseurl = None conf_globals = None def __init__(self, prj, rel_doc_tree): self.rel_path = rel_doc_tree self.prj = prj if rel_doc_tree in ('', '.'): src_path = prj.root_dir else: src_path = prj.root_dir / rel_doc_tree # The src_path may not exist if this is on a Project which # has been created from a normally installed main_package # (because there it has no source code). if src_path.exists(): self.src_path = src_path def __repr__(self): return "{}({!r}, {!r})".format(self.__class__, self.prj, self.rel_path) def __str__(self): return self.rel_path def make_messages(self, ctx): pass def build_docs(self, ctx, *cmdline_args): raise NotImplementedError() def publish_docs(self, ctx): # build_dir = docs_dir / ctx.build_dir_name if self.src_path is None: return build_dir = self.out_path if build_dir.exists(): docs_dir = self.src_path # name = '%s_%s' % (ctx.project_name, docs_dir.name) # dest_url = ctx.docs_rsync_dest % name if "%" in ctx.docs_rsync_dest: name = '%s_%s' % (ctx.project_name, docs_dir.name) dest_url = ctx.docs_rsync_dest % name else: dest_url = ctx.docs_rsync_dest.format( prj=ctx.project_name, docs=docs_dir.name) self.publish_doc_tree(ctx, build_dir, dest_url) def publish_doc_tree(self, ctx, build_dir, dest_url): print("Publish to ", dest_url) with cd(build_dir): args = ['rsync', '-e', 'ssh', '-r'] args += ['--verbose'] args += ['--progress'] # show progress args += ['--delete'] # delete files in
dest args += ['--times'] # preserve timestamps # the problem with --times is that it fails when sev
eral # users can publish to the same server alternatively. # Only the owner of a file can change the mtime, other # users can't, even if they have write permission through # the group. args += ['--exclude', '.doctrees'] args += ['./'] # source args += [dest_url] # dest cmd = ' '.join(args) # must_confirm("%s> %s" % (build_dir, cmd)) ctx.run(cmd, pty=True) class SphinxTree(DocTree): """ The default docs builder using Sphinx. :cmd:`sphinx-build` .. command:: sphinx-build http://www.sphinx-doc.org/en/stable/invocation.html#invocation-of-sphinx-build """ has_intersphinx = True def __init__(self, prj, src_path): super(SphinxTree, self).__init__(prj, src_path) if self.src_path is None: return cfg = prj.config self.out_path = self.src_path / cfg['build_dir_name'] def make_messages(self, ctx): if self.src_path is None: return self.load_conf() translated_languages = self.conf_globals.get('translated_languages', []) if len(translated_languages): # Extract translatable messages into pot files (sphinx-build -M gettext ./ .build/) args = ['sphinx-build', '-b', 'gettext', '.', self.out_path] run_cmd(ctx, self.src_path, args) # Create or update the .pot files (sphinx-intl update -p .build/gettext -l de -l fr) args = ['sphinx-intl', 'update', '-p', self.out_path / "gettext"] for lng in translated_languages: args += ['-l', lng] run_cmd(ctx, self.src_path, args) def build_docs(self, ctx, *cmdline_args): if self.src_path is None: return docs_dir = self.src_path print("Invoking Sphinx in directory %s..." % docs_dir) builder = 'html' if ctx.use_dirhtml: builder = 'dirhtml' self.sphinx_build(ctx, builder, docs_dir, cmdline_args) self.load_conf() translated_languages = self.conf_globals.get('translated_languages', []) for lng in translated_languages: self.sphinx_build(ctx, builder, docs_dir, cmdline_args, lng) self.sync_docs_data(ctx, docs_dir) def load_conf(self): if self.src_path is None: return if self.conf_globals is not None: return conf_py = self.src_path / "conf.py" self.conf_globals = {'__file__': conf_py} code = compile(open(conf_py, "rb").read(), conf_py, 'exec') exec(code, self.conf_globals) # self.html_baseurl = conf_globals.get("html_baseurl", None) def __str__(self): if self.src_path is None: return super(SphinxTree, self).__str__() self.load_conf() return u"{}->{}".format(self.rel_path, self.conf_globals.get('html_title')) def sphinx_build(self, ctx, builder, docs_dir, cmdline_args=[], language=None, build_dir_cmd=None): if self.out_path is None: return # args = ['sphinx-build', builder] args = ['sphinx-build', '-b', builder] args += ['-T'] # show full traceback on exception args += cmdline_args # ~ args += ['-a'] # all files, not only outdated # ~ args += ['-P'] # no postmortem # ~ args += ['-Q'] # no output build_dir = self.out_path if language is not None: args += ['-D', 'language=' + language] # needed in select_lang.html template args += ['-A', 'language=' + language] # if language != ctx.languages[0]: build_dir = build_dir / language # seems that the default location for the .doctrees directory # is no longer in .build but the source directory. args += ['-d', str(build_dir / '.doctrees')] if ctx.tolerate_sphinx_warnings: args += ['-w', 'warnings_%s.txt' % builder] else: args += ['-W'] # consider warnings as errors args += ['--keep-going'] # but keep going until the end to show them all # args += ['-vvv'] # increase verbosity # args += ['-w'+Path(ctx.root_dir,'sphinx_doctest_warnings.txt')] args += ['.', str(build_dir)] run_cmd(ctx, docs_dir, args) if build_dir_cmd is not None: with cd(build_dir): ctx.run(build_dir_cmd, pty=True) def sync_docs_data(self, ctx, docs_dir): # build_dir = docs_dir / ctx.build_dir_name if self.src_path is None: return build_dir = self.out_path for data in ('dl', 'data'): src = (docs_dir / data).absolute() if src.is_dir(): target = build_dir / 'dl' target.mkdir(exist_ok=True) cmd = 'cp -ur %s %s' % (src, target.parent) ctx.run(cmd, pty=True) if False: # according to http://mathiasbynens.be/notes/rel-shortcut-icon for n in ['favicon.ico']: src = (docs_dir / n).absolute() if src.exists(): target = build_dir / n cmd = 'cp %s %s' % (src, target.parent) ctx.run(cmd, pty=True) class NikolaTree(DocTree): """Requires Nikola. Note that Nikola requires:: $ sudo apt install python-gdbm """ def __init__(self, ctx, src_path): super(NikolaTree, self).__init__(ctx, src_path) if self.src_path is None:
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 OpenStack, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Request Body limiting middleware. """ import webob.dec import webob.exc from cinder import flags from cinder.openstack.common import cfg from cinder.openstack.commo
n import log as logging from cinder import wsgi #default request size is 112k max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', default=114688, help='Max size for body of a request') FLAGS = flags.FLAGS FLAGS.register_opt(max_request_body_size_opt) LOG = logging.getLogger(__name__) class Reque
stBodySizeLimiter(wsgi.Middleware): """Add a 'cinder.context' to WSGI environ.""" def __init__(self, *args, **kwargs): super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if (req.content_length > FLAGS.osapi_max_request_body_size or len(req.body) > FLAGS.osapi_max_request_body_size): msg = _("Request is too large.") raise webob.exc.HTTPBadRequest(explanation=msg) else: return self.application
ort unittest class TestChanges(unittest.TestCase): PROJECT = "project" ZONE_NAME = "example.com" CHANGES_NAME = "changeset_id" @staticmethod def _get_target_class(): from google.cloud.dns.changes import Changes return Changes def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def _setUpConstants(self): from google.cloud._helpers import UTC from google.cloud._helpers import _NOW self.WHEN = _NOW().replace(tzinfo=UTC) def _make_resource(self): from google.cloud._helpers import _datetime_to_rfc3339 when_str = _datetime_to_rfc3339(self.WHEN) return { "kind": "dns#change", "id": self.CHANGES_NAME, "startTime": when_str, "status": "done", "additions": [ { "name": "test.example.com", "type": "CNAME", "ttl": "3600", "rrdatas": ["www.example.com"], } ], "deletions": [ { "name": "test.example.com", "type": "CNAME", "ttl": "86400", "rrdatas": ["other.example.com"], } ], } def _verifyResourceProperties(self, changes, resource, zone): from google.cloud._helpers import _rfc3339_to_datetime self.assertEqual(changes.name, resource["id"]) started = _rfc3339_to_datetime(resource["startTime"]) self.assertEqual(changes.started, started) self.assertEqual(changes.status, resource["status"]) r_additions = resource.get("additions", ()) self.assertEqual(len(changes.additions), len(r_additions)) for found, expected in zip(changes.additions, r_additions): self.assertEqual(found.name, expected["name"]) self.assertEqual(found.record_type, expected["type"]) self.assertEqual(found.ttl, int(expected["ttl"])) self.assertEqual(found.rrdatas, expected["rrdatas"]) self.assertIs(found.zone, zone) r_deletions = resource.get("deletions", ()) self.assertEqual(len(changes.deletions), len(r_deletions)) for found, expected in zip(changes.deletions, r_deletions): self.assertEqual(found.name, expected["name"]) self.assertEqual(found.record_type, expected["type"]) self.assertEqual(found.ttl, int(expected["ttl"])) self.assertEqual(found.rrdatas, expected["rrdatas"]) self.assertIs(found.zone, zone) def test_ctor(self): zone = _Zone() changes = self._make_one(zone) self.assertIs(changes.zone, zone) self.assertIsNone(changes.name) self.assertIsNone(changes.status) self.assertIsNone(changes.started) self.assertEqual(list(changes.additions), []) self.assertEqual(list(changes.deletions), []) def test_from_api_repr_missing_additions_deletions(self): self._setUpConstants() RESOURCE = self._make_resource() del RESOURCE["additions"] del RESOURCE["deletions"] zone = _Zone() klass = self._get_target_class() changes = klass.from_api_repr(RESOURCE, zone=zone) self._verifyResourceProperties(changes, RESOURCE, zone) def test_from_api_repr(self): self._setUpConstants() RESOURCE = self._make_resource() zone = _Zone() klass = self._get_target_class() changes = klass.from_api_repr(RESOURCE, zone=zone) self._verifyResourceProperties(changes, RESOURCE, zone) def test_name_setter_bad_value(self): zone = _Zone() changes = self._make_one(zone) with self.assertRaises(ValueError): changes.name = 12345 def test_name_setter(self): zone = _Zone() changes = self._make_one(zone) changes.name = "NAME" self.assertEqual(changes.name, "NAME") def test_add_record_set_invalid_value(self): zone = _Zone() changes = self._make_one(zone) with self.assertRaises(ValueError): changes.add_record_set(object()) def test_add_record_set(self): from google.cloud.dns.resource_record_set import ResourceRecordSet zone = _Zone() changes = self._make_one(zone) rrs = ResourceRecordSet( "test.example.com", "CNAME", 3600, ["www.example.com"], zone ) changes.add_record_set(rrs) self.assertEqual(list(changes.additions), [rrs]) def test_delete_record_set_invalid_value(self): zone = _Zone() changes = self._make_one(zone) with self.assertRaises(ValueError): changes.delete_record_set(object()) def test_delete_record_set(self): from google.cloud.dns.resource_record_set import ResourceRecordSet zone = _Zone() changes = self._make_one(zone) rrs = ResourceRecordSet( "test.example.com", "CNAME", 3600, ["www.example.com"], zone ) changes.delete_record_set(rrs) self.assertEqual(list(changes.deletions), [rrs]) def test_create_wo_additions_or_deletions(self): self._setUpConstants() RESOURCE = self._make_resource() conn = _Connection(RESOURCE) client = _Client(project=self.PROJECT, connection=conn) zone = _Zone(client) changes = self._make_one(zone) with self.assertRaises(ValueError): changes.create() self.assertEqual(len(conn._requested), 0) def test_create_w_bound_client(self): from google.cloud.dns.resource_record_set import ResourceRecordSet self._setUpConstants() RESOURCE = self._make_resource() PATH = "projects/%s/managedZones/%s/changes" % (self.PROJECT, self.ZONE_NAME) conn = _Connection(RESOURCE) client = _Client(project=self.PROJECT, connection=conn) zone = _Zone(client) changes = self._make_one(zone) changes.add_record_set( ResourceRecordSet( "test.example.com", "CNAME", 3600, ["www.example.com"], zone ) ) changes.delete_record_set( ResourceRecordSet( "test.example.com", "CNAME", 86400, ["other.example.com"], zone ) ) changes.create() self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req["method"], "POST") self.assertEqual(req["path"], "/%s" % PATH) SENT = {"additions": RESOURCE["additions"], "deletions": RESOURCE["deletions"]} self.assertEqual(req["data"], SENT) self._verifyResourceProperties(changes, RESOURCE, zone) def test_create_w_alternate_client(self): from google.cloud.dns.resource_record_set import ResourceRecordSet self._setUpConstants() RESOURCE = self._make_resource() PATH = "projects/%s/managedZones/%s/changes" % (self.PROJECT, self.ZONE_NAME) conn1 = _Connection() client1 = _Client(project=self.PROJECT, connection=conn1) conn2 = _Connection(RESOURCE) client2 = _Client(project=self.PROJECT, connection=conn2) zone = _Zone(client1) changes = self._make_one(zone) chan
ges.add_record_set( ResourceRecordSet( "test.example.com", "CNAME", 3600, ["www.example.com"], zone ) ) changes.delete_record_set( ResourceRecordSet( "test.example.com", "CNAME", 86400, ["other.example.com"], zone ) ) changes.creat
e(client=client2) self.assertEqual(len(conn1._requested), 0) self.assertEqual(len(conn2._requested), 1) req = conn2._requested[0] self.assertEqual(req["method"], "POST") self.assertEqual(req["path"], "/%s" % PATH) SENT = {"additions": RESOURCE["additions"], "deletions": RESOURCE["deletions"]} self.assertEqual(req["data"], SENT) self._verifyR
from lumberjack.client.file_descriptor import FileDescriptorEndpoint from lumberjack.client.message_receiver import MessageReceiverFactory from lumberjack.client.message_forwarder import RetryingMessageForwarder from lumberjack.client.protocol import LumberjackProtocolFactory from lumberjack.util.object_pipe import ObjectPipe from multiprocessing import Process from twisted.internet import ssl, task, defer, endpoints from twisted.python.filepath import FilePath class ClientChild(object): _on_shutdown = defer.Deferred() def __init__(self, pipe, shutdown_message, **kwargs): self._pipe = pipe self._shutdown_message = shutdown_message pass def __call__(self, *args, **kwargs): self._pipe.close_writer() task.react(lambda reactor: self.init_reactor(reactor, *args, **kwargs)) def init_reactor(self, reactor, servers, ssl_certificate, *args, **kwargs): forwarder = self.create_message_forwarder(reactor) self.create_message_reader(reactor, forwarder) self.create_ssl_client(reactor, forwarder, servers[0], ssl_certificate) # Create a defer which, when fired, will shut down the app done = defer.Deferred() self._on_shutdown.addCallback(lambda x: done.callback(x)) return done def on_shutdown(self): print("got shutdown message") def create_ssl_client(self, reactor, forwarder, server, ssl_certificate)
: factory = LumberjackProtocolFactory(forwarder) host, port = self.parse_server(server) options = self.create_ssl_context(host, ssl_certificate) connector = reactor.connectSSL(host, port, factory, options) return connector def parse_server(self, server_string): try: host, port = server_string.split(':') return host, int(port) except ValueError: return server_string, 5043 def create_ssl_context(self, host, ssl_certificate): #ssl_context = ssl.SSL
Context(ssl.PROTOCOL_TLSv1) #ssl_context.load_verify_locations(cafile = ssl_certificate) #ssl_context.verify_mode = ssl.CERT_REQUIRED certData = FilePath(ssl_certificate).getContent() authority = ssl.Certificate.loadPEM(certData) options = ssl.optionsForClientTLS(host, authority) return options def create_message_reader(self, reactor, forwarder): factory = MessageReceiverFactory(forwarder, shutdown_params = ShutdownParams( message = self._shutdown_message, deferred = self._on_shutdown )) endpoint = FileDescriptorEndpoint(reactor, self._pipe.get_reader().fileno()) endpoint.listen(factory) return endpoint def create_message_forwarder(self, reactor): forwarder = RetryingMessageForwarder() return forwarder def acknowledge_sent(self, msg_id): self._queue.acknowledge(msg_id) # FIXME: Need to handle monitoring of child process and restart if lost # FIXME: Need to ensure pipe doesn't block if child can't be written to class ClientProcess(object): _pipe = None _shutdown_message = "SHUTDOWN" def __init__(self, **kwargs): self._pipe = ObjectPipe() self._thread = Process( target = ClientChild( pipe = self._pipe, shutdown_message = self._shutdown_message, **kwargs), name = "lumberjack.Client", kwargs = kwargs ) def start(self): self._thread.start() self._pipe.close_reader() def write(self, message): self._pipe.write(message) def shutdown(self, graceful = True): self.write(self._shutdown_message) self._pipe.close_writer() if (graceful): self._thread.join() else: self._thread.terminate() class ShutdownParams(object): def __init__(self, message, deferred): self.message = message self.deferred = deferred
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from abc import ABCMeta from abc import abstractmethod import six @six.add_metaclass(ABCMeta) class CryptographicEngine(object): """ The abstract base class of the cryptographic engine hierarchy. A cryptographic engine is responsible for generating all cryptographic objects and conducting all cryptographic operations for a KMIP server instance. """ @abstractmethod def create_symmetric_key(self, algorithm, length): """ Create a symmetric key. Args: algorithm(C
ryptographicAlgorithm): An enumeration specifying the algorithm for which the created key will be compliant. length(int): The length of the key to be created. This value must be compliant with the constraints of the provided algorithm. Returns: dict: A dictionary containing the key data, with the following key/value fields: * value - the bytes of the key * format - a KeyFormatType enumeration for
the bytes format """ @abstractmethod def create_asymmetric_key_pair(self, algorithm, length): """ Create an asymmetric key pair. Args: algorithm(CryptographicAlgorithm): An enumeration specifying the algorithm for which the created keys will be compliant. length(int): The length of the keys to be created. This value must be compliant with the constraints of the provided algorithm. Returns: dict: A dictionary containing the public key data, with the following key/value fields: * value - the bytes of the key * format - a KeyFormatType enumeration for the bytes format dict: A dictionary containing the private key data, identical in structure to the public key dictionary. """
rt ( get_installable_version, get_package_key_suffix, is_release, ) # A systemctl sub-command to start or restart a service. We use restart here # so that if it is already running it gets restart (possibly necessary to # respect updated configuration) and because restart will also start it if it # is not running. START = "restart" ZFS_REPO = { 'centos-7': "https://s3.amazonaws.com/archive.zfsonlinux.org/" "epel/zfs-release.el7.noarch.rpm", } ARCHIVE_BUCKET = 'clusterhq-archive' def get_repository_url(distribution, flocker_version): """ Return the URL for the repository of a given distribution. For ``yum``-using distributions this gives the URL to a package that adds entries to ``/etc/yum.repos.d``. For ``apt``-using distributions, this gives the URL for a repo containing a Packages(.gz) file. :param bytes distribution: The Linux distribution to get a repository for. :param bytes flocker_version: The version of Flocker to get a repository for. :return bytes: The URL pointing to a repository of packages. :raises: ``UnsupportedDistribution`` if the distribution is unsupported. """ distribution_to_url = { # TODO instead of hardcoding keys, use the _to_Distribution map # and then choose the name 'centos-7': "https://{archive_bucket}.s3.amazonaws.com/" "{key}/clusterhq-release$(rpm -E %dist).noarch.rpm".format( archive_bucket=ARCHIVE_BUCKET, key='centos', ), # This could hardcode the version number instead of using # ``lsb_release`` but that allows instructions to be shared between # versions, and for earlier error reporting if you try to install on a # separate version. The $(ARCH) part must be left unevaluated, hence # the backslash escapes (one to make shell ignore the $ as a # substitution marker, and then doubled to make Python ignore the \ as # an escape marker). The output of this value then goes into # /etc/apt/sources.list which does its own substitution on $(ARCH) # during a subsequent apt-get update 'ubuntu-14.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/' '$(lsb_release --release --short)/\\$(ARCH)'.format( archive_bucket=ARCHIVE_BUCKET, key='ubuntu' + get_package_key_suffix( flocker_version), ), 'ubuntu-15.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/' '$(lsb_release --release --short)/\\$(ARCH)'.format( archive_bucket=ARCHIVE_BUCKET, key='ubuntu' + get_package_key_suffix( flocker_version), ), } try: return distribution_to_url[distribution] except KeyError: raise UnsupportedDistribution() def get_repo_options(flocker_version): """ Get a list of options for enabling necessary yum repositories. :param bytes flocker_version: The version of Flocker to get options for. :return: List of bytes for enabling (or not) a testing repository. """ is_dev = not is_release(flocker_version) if is_dev: return ['--enablerepo=clusterhq-testing'] else: return [] class UnsupportedDistribution(Exception): """ Raised if trying to support a distribution which is not supported. """ @attributes(['distribution']) class DistributionNotSupported(NotImplementedError): """ Raised when the provisioning step is not supported on the given distribution. :ivar bytes distribution: The distribution that isn't supported. """ def __str__(self): return "Distribution not supported: %s" % (self.distribution,) @implementer(INode) class ManagedNode(PRecord): """ A node managed by some other system (eg by hand or by another piece of orchestration software). """ address = field(type=bytes, mandatory=True) private_address = field(type=(bytes, type(None)), initial=None, mandatory=True) distribution = field(type=bytes, mandatory=True) def task_client_installation_test(): """ Check that the CLI is working. """ return run_from_args(['flocker-deploy', '--version']) def install_cli_commands_yum(distribution, package_source): """ Install Flocker CLI on CentOS. The ClusterHQ repo is added for downloading latest releases. If ``package_source`` contains a branch, then a BuildBot repo will also be added to the package search path, to use in-development packages. Note, the ClusterHQ repo is always enabled, to provide dependencies. :param bytes distribution: The distribution the node is running. :param PackageSource package_source: The source from which to install the package. :return: a sequence of commands to run on the distribution """ if package_source.branch: # A development branch has been selected - add its Buildbot repo use_development_branch = True result_path = posixpath.join( '/results/omnibus/', package_source.branch, distribution) base_url = urljoin(package_source.build_server, result_path) else: use_development_branch = False commands = [ sudo(command="yum install -y " + get_repository_url( distribution=distribution, flocker_version=get_installable_version(version))), ] if use_development_branch: repo = dedent(b"""\ [clusterhq-build] name=clusterhq-build baseurl=%s gpgcheck=0 enabled=0 """) % (base_url,) commands.append(put(content=repo, path='/tmp/clusterhq-build.repo')) commands.append(sudo_from_args([ 'cp', '/tmp/clusterhq-build.repo', '/etc/yum.repos.d/clusterhq-build.repo'])) repo_options = ['--enablerepo=clusterhq-build'] else: repo_options = get_repo_options( flocker_version=get_installable_version(version)) if package_source.os_version: package = 'clusterhq-flocker-cli-%s' % (package_source.os_version,) else: package = 'clusterhq-flocker-cli' # Install Flocker CLI and all dependencies commands.append(sudo_from_args( ["yum", "install"] + repo_options + ["-y", package])) return sequence(commands) def install_cli_commands_ubuntu(distribution, package_source): """ Install flocker CLI on Ubuntu. The ClusterHQ repo is added for downloading latest releases. If ``package_source`` contains a branch, then a BuildBot repo will also be added to the package search path, to use in-development packages. Note, the ClusterHQ repo is always enabled, to provide dependencies. :param bytes distribution: The distribution the node is running. :param PackageSource package_source: The source from which to install the package. :return: a sequence of commands to run on the distribution """ if package_source.branch: # A development branch has been selected - add its Buildbot repo use_development_branch = True result_path = posixpath.join( '/results/omnibus/', package_source.branch, distribution) base_url = urljoin(package_source.build_server, result_path) else: use_development_branch = False commands = [ # Minimal images often have cleared apt caches and are missing # packages that are common in a typical release. These commands # ensure that we start from a good base system with the required # capabilities, particularly that the add-apt-repository comma
nd # and HTTPS URLs are supported. # FLOC-1880 will ensure these are necessary and sufficient. sudo_from_args(["apt-get", "update"]), sudo_from_args([ "apt-get", "-y
", "install", "apt-transp
i = 0 while i <3: w
hile i <2: i
+= 1 i += 1
from math import pi, sin, cos, tan, sqrt from recordclass import recordclass import numpy as np import scipy.signal as signal import matplotlib.pyplot as plt from functools import reduce def db2a(db): return np.power(10, (db / 20.0)) def a2db(a): return 20 * np.log10(a) def series_coeffs(c): return reduce(lambda (a, b), (x, y): ( np.convolve(a, x), np.convolve(b, y)), c) def twopass_coeffs(c): return series_coeffs(c + c) def get_linkwitz_riley_coeffs(gain, lo, hi, sr): def get_c(cutoff, sr): wcT = pi * cutoff / sr return 1 / tan(wcT) def get_lopass_coeffs(gain, cutoff, sr): c = get_c(cutoff, sr) a0 = c * c + c * sqrt(2) + 1 b = [gain / a0, 2 * gain / a0, gain / a0] a = [1, (-2 * (c * c - 1)) / a0, (c * c - c * sqrt(2) + 1) / a0] return b, a def get_hipass_coeffs(gain, cutoff, sr): c = get_c(cutoff, sr) a0 = c * c + c * sqrt(2) + 1 b = [(gain * c * c) / a0, (-2 * gain * c * c) / a0, (gain * c * c) / a0] a = [1, (-2 * (c * c - 1)) / a0, (c * c - c * sqrt(2) + 1) / a0] return b, a return twopass_coeffs([get_lopass_coeffs(gain, hi, sr), get_hipass_coeffs(gain, lo, sr)]) def get_notch_coeffs(gain, centre, sr, Q): A = db2a(gain / 2) w0 = 2 * pi * centre / sr cw0 = cos(w0) sw0 = sin(w0) alpha = sw0 / 2 * Q a0 = 1 + alpha / A b = [(1 + alpha * A) / a0, (-2 * cw0) / a0, (1 - alpha * A) / a0] a = [1, (-2 * cw0) / a0, (1 - alpha / A) / a0] return b, a def get_peak_coeffs(gain, centre, sr, Q): A = db2a(gain / 2) w0 = 2 * pi * centre / sr cw0 = cos(w0) sw0 = sin(w0) alpha = sw0 / 2 * Q a0 = 1 + alpha / A b = [(1 + (alpha * A)) / a0, (-2 * cw0) / a0, (1 - alpha * A) / a0] a = [1, (-2 * cw0) / a0, (1 - alpha / A) / a0] return b, a BiquadMemory = recordclass('BiquadMemory', ['z1', 'z2']) BiquadCoefficients = recordclass( 'BiquadCoefficients', [ 'b0', 'b1', 'b2', 'a1', 'a2']) def biquad_step(i, bm, bc): out = i * bc.b0 + bm.z1 bm.z1 = i * bc.b1 - bc.a1 * out + bm.z2 bm.z2 = i * bc.b2 - bc.a2 * out return out def biquad_cascade(i, bm, bc): for m, c in zip(bm, bc): i = biquad_step(i, m, c) return i def impedance_filter(c): num = c[0] den = c[1] summed = [a + b for a, b in zip(den, num)] subbed = [a - b for a, b in zip(den, num)] norm = 1 / subbed[0] summed = [i * norm for i in summed] subbed = [i * norm for i in subbed] return [summed, subbed] def eighth_order_step(i, m, c): out = i * c[0][0] + m[0] m[0] = i * c[0][1] - c[1][1] * out + m[1] m[1] = i * c[0][2] - c[1][2] * out + m[2] m[2] = i * c[0][3] - c[1][3] * out + m[3] m[3] = i * c[0][4] - c[1][4] * out + m[4] m[4] = i * c[0][5] - c[1][5] * out + m[5] m[5] = i * c[0][6] - c[1][6] * out + m[6] m[6] = i * c[0][7] - c[1][7] * out + m[7] m[7] = i * c[0][8] - c[1][8] * out return out def main(): edges = [30, 60, 120, 240] corners = zip(edges[:-1], edges[1:]) centres = [(a + b) / 2 for a, b in corners] #c = [get_linkwitz_riley_coeffs(1, b, a, edges[-1] * 2) for b, a in corners] sr = 2000 c = [get_peak_coeffs(-24, i, sr, 1) for i in centres] c.append([[1, 0, 0], [1, 0, 0]]) bm = [BiquadMemory(0, 0) for _ in c] bc = [BiquadCoefficients(b0, b1, b2, a1, a2) for [b0, b1, b2], [a0, a1, a2] in c]
c.append(series_coeffs(c)) # c.append(impedance_filter(c[-1])) wh = [signal.freqz(b, a) for b, a in c] plt.subplot(111) plt.title("Frequency response - reflection filte
r") for w, h in wh: plt.semilogx(w, 20 * np.log10(np.abs(h))) plt.ylabel('Amplitude Response (dB)') plt.xlabel('Frequency (rad/sample)') plt.grid() plt.show() if __name__ == "__main__": main()
################################################################################ # Copyright (C) 2012-2013 Leap Motion, Inc. All rights reserved. # # Leap Motion proprietary and confidential. Not for distribution. # # Use subject to the terms of the Leap Motion SDK Agreement available at # # https://developer.leapmotion.com/sdk_agreement, or another agreement # # between Leap Motion and you, your company or other organization. # ################################################################################ # set library path import os, sys, inspect src_dir = os.path.dirname(inspect.getfile(inspect.currentframe())) arch_dir = 'lib/x64' if sys.maxsize > 2**32 else 'lib/x86' sys.path.insert(0, os.path.abspath(os.path.join(src_dir, arch_dir))) import Leap, sys, thread, time from Leap import CircleGesture, KeyTapGesture, ScreenTapGesture, SwipeGesture class SampleListener(Leap.Listener): finger_names = ['Thumb', 'Index', 'Middle', 'Ring', 'Pinky'] bone_names = ['Metacarpal', 'Proximal', 'Intermediate', 'Distal'] state_names = ['STATE_INVALID', 'STATE_START', 'STATE_UPDATE', 'STATE_END'] def on_init(self, controller): print "Initialized" def on_connect(self, controller): print "Connected" # Enable gestures controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE); controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP); controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP); controller.enable_gesture(Leap.Gesture.TYPE_SWIPE); def on_disconnect(self, controller): # Note: not dispatched when running in a debugger. print "Disconnected" def on_exit(self, controller): print "Exited" def on_frame(self, controller): # Get the most recent frame and report some basic information frame = controller.frame() print "Frame id: %d, timestamp: %d, hands: %d, fingers: %d, tools: %d, gestures: %d" % ( frame.id, frame.timestamp, len(frame.hands), len(frame.fingers), len(frame.tools), len(frame.gestures())) # Get hands for hand in frame.hands: handType = "Left hand" if hand.is_left else "Right hand" print " %s, id %d, position: %s" % ( handType, hand.id, hand.palm_position) # Get the hand's normal vector and direction normal = hand.palm_normal direction = hand.direction # Calculate the hand's pitch, roll, and yaw angles print " pitch: %f degrees, roll: %f degrees, yaw: %f degrees" % ( direction.pitch * Leap.RAD_TO_DEG, normal.roll * Leap.RAD_TO_DEG, direction.yaw * Leap.RAD_TO_DEG) # Get arm bone arm = hand.arm print " Arm direction: %s, wrist position: %s, elbow position: %s" % ( arm.direction, arm.wrist_position, arm.elbow_position) # Get fingers for finger in hand.fingers: print " %s finger, id: %d, length: %fmm, width: %fmm" % ( self.finger_names[finger.type()], finger.id, finger.length, finger.width) # Get bones for b in range(0, 4): bone = finger.bone(b) print " Bone: %s, start: %s, end: %s, direction: %s" % ( self.bone_names[bone.type], bone.prev_joint, bone.next_joint, bone.direction) # Get tools for tool in frame.tools: print " Tool id: %d, position: %s, direction: %s" % ( tool.id, tool.tip_position, tool.direction) # Get gestures for gesture in frame.gestures(): if gesture.type == Leap.Gesture.TYPE_CIRCLE: circle = CircleGesture(gesture) # Determine clock direction using the angle between the pointable and the circle normal if circle.pointable.direction.angle_to(circle.normal) <= Leap.PI/2: clockwiseness = "clockwise" else: clockwiseness = "counterclockwise" # Calculate the angle swept since the last frame swept_angle = 0 if circle.state != Leap.Gesture.STATE_START: previous_update = CircleGesture(controller.frame(1).gesture(circle.id)) swept_angle = (circle.progress - previous_update.progress) * 2 * Leap.PI print " Circle id: %d, %s, progress: %f, radius: %f, angle: %f degrees, %s" % ( gesture.id, self.state_names[gesture.state], circle.progress, circle.radius, swept_angle * Leap.RAD_TO_DEG, clockwiseness) if gesture.type == Leap.Gesture.TYPE_SWIPE:
swipe = SwipeGesture(gesture) print " Swipe id: %d, state: %s, position: %s, direction: %s, speed: %f" % ( gesture.id, self.state_names[gesture.state], swipe.position, swipe.direction, swipe.speed)
if gesture.type == Leap.Gesture.TYPE_KEY_TAP: keytap = KeyTapGesture(gesture) print " Key Tap id: %d, %s, position: %s, direction: %s" % ( gesture.id, self.state_names[gesture.state], keytap.position, keytap.direction ) if gesture.type == Leap.Gesture.TYPE_SCREEN_TAP: screentap = ScreenTapGesture(gesture) print " Screen Tap id: %d, %s, position: %s, direction: %s" % ( gesture.id, self.state_names[gesture.state], screentap.position, screentap.direction ) if not (frame.hands.is_empty and frame.gestures().is_empty): print "" def state_string(self, state): if state == Leap.Gesture.STATE_START: return "STATE_START" if state == Leap.Gesture.STATE_UPDATE: return "STATE_UPDATE" if state == Leap.Gesture.STATE_STOP: return "STATE_STOP" if state == Leap.Gesture.STATE_INVALID: return "STATE_INVALID" def main(): # Create a sample listener and controller listener = SampleListener() controller = Leap.Controller() # Have the sample listener receive events from the controller controller.add_listener(listener) # Keep this process running until Enter is pressed print "Press Enter to quit..." try: sys.stdin.readline() except KeyboardInterrupt: pass finally: # Remove the sample listener when done controller.remove_listener(listener) if __name__ == "__main__": main()
rs from packstack.installer import basedefs from packstack.installer import utils from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile from packstack.installer import exceptions from packstack.installer import output_messages # Controller object will # be initialized from main flow controller = None # Plugin name PLUGIN_NAME = "OS-Cinder" PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue') logging.debug("plugin %s loaded", __name__) def initConfig(controllerObject): global controller controller = controllerObject logging.debug("Adding OpenStack Cinder configuration") paramsList = [ {"CMD_OPTION" : "cinder-host", "USAGE" : "The IP address of the server on which to install Cinder", "PROMPT" : "Enter the IP address of the Cinder server", "OPTION_LIST" : [], "VALIDATORS" : [validators.validate_ssh], "DEFAULT_VALUE" : utils.get_localhost_ip(), "MASK_INPUT" : False, "LOOSE_VALIDATION": True, "CONF_NAME" : "CONFIG_CINDER_HOST", "USE_DEFAULT" : False, "NEED_CONFIRM" : False, "CONDITION" : False }, {"CMD_OPTION" : "cinder-db-passwd", "USAGE" : "The password to use for the Cinder to access DB", "PROMPT" : "Enter the password for the Cinder DB access", "OPTION_LIST" : [], "VALIDATORS" : [validators.validate_not_empty], "DEFAULT_VALUE" : uuid.uuid4().hex[:16], "MASK_INPUT" : True, "LOOSE_VALIDATION": False, "CONF_NAME" : "CONFIG_CINDER_DB_PW", "USE_DEFAULT" : True, "NEED_CONFIRM" : True, "CONDITION" : False }, {"CMD_OPTION" : "cinder-ks-passwd", "USAGE" : "The password to use for the Cinder to authenticate with Keystone", "PROMPT" : "Enter the password for the Cinder Keystone access", "OPTION_LIST" : [], "VALIDATORS" : [validators.validate_not_empty], "DEFAULT_VALUE" : uuid.uuid4().hex[:16], "MASK_INPUT" : True, "LOOSE_VALIDATION": False, "CONF_NAME" : "CONFIG_CINDER_KS_PW", "USE_DEFAULT" : True, "NEED_CONFIRM" : True, "CONDITION" : False }, {"CMD_OPTION" : "cinder-backend", "USAGE" : ("The Cinder backend to use, valid options are: " "lvm, gluster, nfs"), "PROMPT" : "Enter the Cinder backend to be configured", "OPTION_LIST" : ["lvm", "gluster", "nfs"], "VALIDATORS" : [validators.validate_options], "DEFAULT_VALUE" : "lvm", "MASK_INPUT" : False, "LOOSE_VALIDATION": False, "CONF_NAME" : "CONFIG_CINDER_BACKEND", "USE_DEFAULT" : False, "NEED_CONFIRM" : False, "CONDITION" : False }, ] groupDict = { "GROUP_NAME" : "CINDER", "DESCRIPTION" : "Cinder Config parameters", "PRE_CONDITION" : "CONFIG_CINDER_INSTALL", "PRE_CONDITION_MATCH" : "y", "POST_CONDITION" : False, "POST_CONDITION_MATCH" : True} controller.addGroup(groupDict, paramsList) def check_lvm_options(config): return (config.get('CONFIG_CINDER_INSTALL', 'n') == 'y' and config.get('CONFIG_CINDER_BACKEND', 'lvm') == 'lvm') paramsList = [ {"CMD_OPTION" : "cinder-volumes-create", "USAGE" : ("Create Cinder's volumes group. This should only be done for " "testing on a proof-of-concept installation of Cinder. This " "will create a file-backed volume group and is not suitable " "for production usage."), "PROMPT" : ("Should Cinder's volumes group be created (for proof-of-concept " "installation)?"), "OPTION_LIST" : ["y", "n"], "VALIDATORS" : [validators.validate_options], "DEFAULT_VALUE" : "y", "MASK_INPUT" : False, "LOOSE_VALIDATION": False, "CONF_NAME" : "CONFIG_CINDER_VOLUMES_CREATE", "USE_DEFAULT" : False, "NEED_CONFIRM" : False, "CONDITION" : False }, ] groupDict = { "GROUP_NAME" : "CINDERVOLUMECREATE", "DESCRIPTION" : "Cinder volume create Config parameters", "PRE_CONDITION" : check_lvm_options, "PRE_CONDITION_MATCH" : True, "POST_CONDITION" : False, "POST_CONDITION_MATCH" : True} controller.addGroup(groupDict, paramsList) def check_lvm_vg_options(config): return (config.get('CONFIG_CINDER_INSTALL', 'n') == 'y' and config.get('CONFIG_CINDER_BACKEND', 'lvm') == 'lvm' and config.get('CONFIG_CINDER_VOLUMES_CREATE', 'y') == 'y') paramsList = [ {"CMD_OPTION" : "cinder-volumes-size", "USAGE" : ("Cinder's volumes group size. Note that actual volume size " "will be extended with 3% more space for VG metadata."), "PROMPT" : "Enter Cinder's volumes group usable size", "OPTION_LIST" : [], "VALIDATORS" : [validators.validate_not_empty], "DEFAULT_VALUE" : "20G", "MASK_INPUT" : False, "LOOSE_VALIDATION": False, "CONF_NAME" : "CONFIG_CINDER_VOLUMES_SIZE", "USE_DEFAULT" : False, "NEED_CONFIRM" : False, "CONDITION" : False }, ] groupDict = { "GROUP_NAME" : "CINDERVOLUMESIZE", "DESCRIPTION" : "Cinder volume size Config parameters", "PRE_CONDITION" : check_lvm_vg_options, "PRE_CONDITION_MATCH" : True, "POST_CONDITION" : False, "POST_CONDITION_MATCH" : True} controller.addGroup(groupDict, paramsList) def check_gluster_options(config): return (config.get('CONFIG_CINDER_INSTALL', 'n') == 'y' and config.get('CONFIG_CINDER_BACKEND', 'lvm') == 'gluster') paramsList = [ {"CMD_OPTION" : "cinder-gluster-mounts", "USAGE" : ("A single or comma separated list of gluster volume shares " "to mount, eg: ip-address:/vol-name "), "PROMPT"
: ("Ente
r a single or comma separated list of gluster volume " "shares to use with Cinder"), "OPTION_LIST" : ["^'([\d]{1,3}\.){3}[\d]{1,3}:/.*'"], "VALIDATORS" : [validators.validate_multi_regexp], "PROCESSORS" : [processors.process_add_quotes_around_values], "DEFAULT_VALUE" : "", "MASK_INPUT" : False, "LOOSE_VALIDATION": True, "
"""Testing the StringEnum class.""" import ezenum as ez
e def test_basic(): """Just check it out.""" rgb = eze.StringEnum(['Red', 'Green', 'Blue']) assert rgb.Red == 'Red' assert rgb.Green == 'Green' assert rgb.Blue == 'Blue' asser
t rgb[0] == 'Red' assert rgb[1] == 'Green' assert rgb[2] == 'Blue' assert len(rgb) == 3 assert repr(rgb) == "['Red', 'Green', 'Blue']"
SSL/TLS support and the pyOpenSSL module. A HIGH severity warning will be reported whenever known broken protocol versions are detected. It is worth noting that native support for TLS 1.2 is only available in more recent Python versions, specifically 2.7.9 and up, and 3.x A note on 'SSLv23': Amongst the available SSL/TLS versions provided by Python/pyOpenSSL there exists the option to use SSLv23. This very poorly named option actually means "use the highest version of SSL/TLS supported by both the server and client". This may (and should be) a version well in advance of SSL v2 or v3. Bandit can scan for the use of SSLv23 if desired, but its detection does not necessarily indicate a problem. When using SSLv23 it is important to also provide flags to explicitly exclude bad versions of SSL/TLS from the protocol versions considered. Both the Python native and pyOpenSSL modules provide the ``OP_NO_SSLv2`` and ``OP_NO_SSLv3`` flags for this purpose. **Config Options:** .. code-block:: yaml ssl_with_bad_version: bad_protocol_versions: - PROTOCOL_SSLv2 - SSLv2_METHOD - SSLv23_METHOD - PROTOCOL_SSLv3 # strict option - PROTOCOL_TLSv1 # strict option - SSLv3_METHOD # strict option - TLSv1_METHOD # strict option :Example: .. code-block:: none >> Issue: ssl.wrap_socket call with insecure SSL/TLS protocol version identified, security issue. Severity: High Confidence: High Location: ./examples/ssl-insecure-version.py:13 12 # strict tests 13 ssl.wrap_socket(ssl_version=ssl.PROTOCOL_SSLv3) 14 ssl.wrap_socket(ssl_version=ssl.PROTOCOL_TLSv1) .. seealso:: - :func:`ssl_with_bad_defaults` - :func:`ssl_with_no_version` - http://heartbleed.com/ - https://poodlebleed.com/ - https://security.openstack.org/ - https://security.openstack.org/guidelines/dg_move-data-securely.html .. versionadded:: 0.9.0 """
bad_ssl_versions = get_bad_proto_versions(config) if context.call_function_name_qual == 'ssl.wrap_socket': if context.check_call_arg_value('ssl_version', bad_ssl_versions): return bandit.Issue( severity=bandit.HIGH, confidence=bandit.HIGH, text="ssl.wrap_socket call with insecure SSL/TLS protocol " "version identified, security issue.", lineno=context.get_lineno_for_call_arg('ssl_
version'), ) elif context.call_function_name_qual == 'pyOpenSSL.SSL.Context': if context.check_call_arg_value('method', bad_ssl_versions): return bandit.Issue( severity=bandit.HIGH, confidence=bandit.HIGH, text="SSL.Context call with insecure SSL/TLS protocol " "version identified, security issue.", lineno=context.get_lineno_for_call_arg('method'), ) elif (context.call_function_name_qual != 'ssl.wrap_socket' and context.call_function_name_qual != 'pyOpenSSL.SSL.Context'): if (context.check_call_arg_value('method', bad_ssl_versions) or context.check_call_arg_value('ssl_version', bad_ssl_versions)): lineno = (context.get_lineno_for_call_arg('method') or context.get_lineno_for_call_arg('ssl_version')) return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.MEDIUM, text="Function call with insecure SSL/TLS protocol " "identified, possible security issue.", lineno=lineno, ) @test.takes_config("ssl_with_bad_version") @test.checks('FunctionDef') @test.test_id('B503') def ssl_with_bad_defaults(context, config): """**B503: Test for SSL use with bad defaults specified** This plugin is part of a family of tests that detect the use of known bad versions of SSL/TLS, please see :doc:`../plugins/ssl_with_bad_version` for a complete discussion. Specifically, this plugin test scans for Python methods with default parameter values that specify the use of broken SSL/TLS protocol versions. Currently, detection supports methods using Python's native SSL/TLS support and the pyOpenSSL module. A MEDIUM severity warning will be reported whenever known broken protocol versions are detected. **Config Options:** This test shares the configuration provided for the standard :doc:`../plugins/ssl_with_bad_version` test, please refer to its documentation. :Example: .. code-block:: none >> Issue: Function definition identified with insecure SSL/TLS protocol version by default, possible security issue. Severity: Medium Confidence: Medium Location: ./examples/ssl-insecure-version.py:28 27 28 def open_ssl_socket(version=SSL.SSLv2_METHOD): 29 pass .. seealso:: - :func:`ssl_with_bad_version` - :func:`ssl_with_no_version` - http://heartbleed.com/ - https://poodlebleed.com/ - https://security.openstack.org/ - https://security.openstack.org/guidelines/dg_move-data-securely.html .. versionadded:: 0.9.0 """ bad_ssl_versions = get_bad_proto_versions(config) for default in context.function_def_defaults_qual: val = default.split(".")[-1] if val in bad_ssl_versions: return bandit.Issue( severity=bandit.MEDIUM, confidence=bandit.MEDIUM, text="Function definition identified with insecure SSL/TLS " "protocol version by default, possible security " "issue." ) @test.checks('Call') @test.test_id('B504') def ssl_with_no_version(context): """**B504: Test for SSL use with no version specified** This plugin is part of a family of tests that detect the use of known bad versions of SSL/TLS, please see :doc:`../plugins/ssl_with_bad_version` for a complete discussion. Specifically, This plugin test scans for specific methods in Python's native SSL/TLS support and the pyOpenSSL module that configure the version of SSL/TLS protocol to use. These methods are known to provide default value that maximize compatibility, but permit use of the aforementioned broken protocol versions. A LOW severity warning will be reported whenever this is detected. **Config Options:** This test shares the configuration provided for the standard :doc:`../plugins/ssl_with_bad_version` test, please refer to its documentation. :Example: .. code-block:: none >> Issue: ssl.wrap_socket call with no SSL/TLS protocol version specified, the default SSLv23 could be insecure, possible security issue. Severity: Low Confidence: Medium Location: ./examples/ssl-insecure-version.py:23 22 23 ssl.wrap_socket() 24 .. seealso:: - :func:`ssl_with_bad_version` - :func:`ssl_with_bad_defaults` - http://heartbleed.com/ - https://poodlebleed.com/ - https://security.openstack.org/ - https://security.openstack.org/guidelines/dg_move-data-securely.html .. versionadded:: 0.9.0 """ if context.call_function_name_qual == 'ssl.wrap_socket': if context.check_call_arg_value('ssl_version') is None: # check_call_arg_value() returns False if the argument is found # but does not match the supplied value (or the default None). # It returns None if the arg_name passed doesn't exist. This # tests for that (ssl_version is not specified). return bandit.Issue( severity=bandit.LOW, confidence=bandit.MEDIUM, text="ssl.wrap_socket call with no SSL/TLS protocol version " "specified, the default SSLv23 could be insec
nt_id), # ClientId size client_id) # ClientId @classmethod def _encode_message_set(cls, messages): """ Encode a MessageSet. Unlike other arrays in the protocol, MessageSets are not length-prefixed Format ====== MessageSet => [Offset MessageSize Message] Offset => int64 MessageSize => int32 """ message_set = [] for message in messages: encoded_message = KafkaProtocol._encode_message(message) message_set.append(struct.pack('>qi%ds' % len(encoded_message), 0, len(encoded_message), encoded_message)) return b''.join(message_set) @classmethod def _encode_message(cls, message): """ Encode a single message. The magic number of a message is a format version number. The only supported magic number right now is zero Format ====== Message => Crc MagicByte Attributes Key Value Crc => int32 MagicByte => int8 Attributes => int8 Key => bytes Value => bytes """ if message.magic == 0: msg = b''.join([ struct.pack('>BB', message.magic, message.attributes), write_int_string(message.key), write_int_string(message.value) ]) crc = crc32(msg) msg = struct.pack('>i%ds' % len(msg), crc, msg) else: raise ProtocolError("Unexpected magic number: %d" % message.magic) return msg ################## # Public API # ################## @classmethod def encode_produce_request(cls, payloads=(), acks=1, timeout=1000): """ Encode a ProduceRequest struct Arguments: payloads: list of ProduceRequestPayload acks: How "acky" you want the request to be 1
: written to disk by the leader 0: immediate response -1: waits for all replicas to be in sync timeout: Maximum time (in ms) the server will wait for replica acks. This is _not_ a socket timeout Returns: ProduceRequest """
if acks not in (1, 0, -1): raise ValueError('ProduceRequest acks (%s) must be 1, 0, -1' % acks) return kafka.protocol.produce.ProduceRequest( required_acks=acks, timeout=timeout, topics=[( topic, [( partition, [(0, 0, kafka.protocol.message.Message(msg.value, key=msg.key, magic=msg.magic, attributes=msg.attributes)) for msg in payload.messages]) for partition, payload in topic_payloads.items()]) for topic, topic_payloads in group_by_topic_and_partition(payloads).items()]) @classmethod def decode_produce_response(cls, response): """ Decode ProduceResponse to ProduceResponsePayload Arguments: response: ProduceResponse Return: list of ProduceResponsePayload """ return [ kafka.common.ProduceResponsePayload(topic, partition, error, offset) for topic, partitions in response.topics for partition, error, offset in partitions ] @classmethod def encode_fetch_request(cls, payloads=(), max_wait_time=100, min_bytes=4096): """ Encodes a FetchRequest struct Arguments: payloads: list of FetchRequestPayload max_wait_time (int, optional): ms to block waiting for min_bytes data. Defaults to 100. min_bytes (int, optional): minimum bytes required to return before max_wait_time. Defaults to 4096. Return: FetchRequest """ return kafka.protocol.fetch.FetchRequest( replica_id=-1, max_wait_time=max_wait_time, min_bytes=min_bytes, topics=[( topic, [( partition, payload.offset, payload.max_bytes) for partition, payload in topic_payloads.items()]) for topic, topic_payloads in group_by_topic_and_partition(payloads).items()]) @classmethod def decode_fetch_response(cls, response): """ Decode FetchResponse struct to FetchResponsePayloads Arguments: response: FetchResponse """ return [ kafka.common.FetchResponsePayload( topic, partition, error, highwater_offset, [ kafka.common.OffsetAndMessage(offset, message) for offset, _, message in messages]) for topic, partitions in response.topics for partition, error, highwater_offset, messages in partitions ] @classmethod def encode_offset_request(cls, payloads=()): return kafka.protocol.offset.OffsetRequest( replica_id=-1, topics=[( topic, [( partition, payload.time, payload.max_offsets) for partition, payload in six.iteritems(topic_payloads)]) for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))]) @classmethod def decode_offset_response(cls, response): """ Decode OffsetResponse into OffsetResponsePayloads Arguments: response: OffsetResponse Returns: list of OffsetResponsePayloads """ return [ kafka.common.OffsetResponsePayload(topic, partition, error, tuple(offsets)) for topic, partitions in response.topics for partition, error, offsets in partitions ] @classmethod def encode_metadata_request(cls, topics=(), payloads=None): """ Encode a MetadataRequest Arguments: topics: list of strings """ if payloads is not None: topics = payloads return kafka.protocol.metadata.MetadataRequest(topics) @classmethod def decode_metadata_response(cls, response): return response @classmethod def encode_consumer_metadata_request(cls, client_id, correlation_id, payloads): """ Encode a ConsumerMetadataRequest Arguments: client_id: string correlation_id: int payloads: string (consumer group) """ message = [] message.append(cls._encode_message_header(client_id, correlation_id, KafkaProtocol.CONSUMER_METADATA_KEY)) message.append(struct.pack('>h%ds' % len(payloads), len(payloads), payloads)) msg = b''.join(message) return write_int_string(msg) @classmethod def decode_consumer_metadata_response(cls, data): """ Decode bytes to a ConsumerMetadataResponse Arguments: data: bytes to decode """ ((correlation_id, error, nodeId), cur) = relative_unpack('>ihi', data, 0) (host, cur) = read_short_string(data, cur) ((port,), cur) = relative_unpack('>i', data, cur) return ConsumerMetadataResponse(error, nodeId, host, port) @classmethod def encode_offset_commit_request(cls, group, payloads): """ Encode an OffsetCommitRequest struct Arguments: group: string, the consumer group you are committing offsets for payloads: list of OffsetCommitRequestPayload """ return kafka.protocol.commit.OffsetCommitRequest_v0( consumer_group=group, topics=[( topic, [( partition, payload.offset,
# # Copyright 2008 Google Inc. All Rights Reserved. """ The user module contains the objects and methods used to manage users in Autotest. The valid action is: list: lists user(s) The common options are: --ulist / -U: file containing a list of USERs See topic_common.py for a High Level Design and Algorithm. """ import os import sys from autotest.cli import topic_common, action_common class user(topic_common.atest): """User class atest user list <options>""" usage_action = 'list' topic = msg_topic = 'user' msg_items = '<users>' def __init__(self): """Add to the parser the options common to all the user actions""" super(user, self).__init__() self.parser.add_option('-U', '--ulist', help='File listing the users', type='string', default=None, metavar='USER_FLIST') self.topic_parse_info = topic_common.item_parse_info( attribute_name='users', filename_option='ulist', use_leftover=True) def get_items(self): return self.users class user_help(user): """Just here to get the atest logic working. Usage is set by its parent""" pass class user_list(action_common.atest_list, user): """atest user list <user>|--ulist <file> [--acl <ACL>|--access_level <n>]""" def __init__(self): super(user_list, self).__init__() self.parser.add_option('-a', '--acl', help='Only list users within this ACL') self.parser.add_option('-l', '--access_level', help='Only list users at this access level') def parse(self): (options, leftover) = super(user_list, self).parse() self.acl = options.acl self.access_level = options.access_level return (options, leftover) def execute(self): filters = {} check_results = {} if self.acl: filters['aclgroup__name__in'] = [self.acl] check_results['aclgroup__name__in'] = None if self.access_level: filters['access_level__in'] = [self.access_level] check_results['access_level__in'] = None if self.users: filters['login__in'] = self.users check_results['login__in'] = 'login' return super(user_list, self).execute(op='get_users', filters=filters, check_results=check_results) def output(self, results): if self.verbose: keys = ['id', 'login', 'access_level']
else: keys = ['login']
super(user_list, self).output(results, keys)
# This file is part of the FragDev Website. # # the FragDev Website is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, o
r # (at your option) any later version. # # the FragDev Website is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MER
CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with the FragDev Website. If not, see <http://www.gnu.org/licenses/>. # Placeholder urlpatterns list, in case views are added app_name = 'images' urlpatterns = []
from Graph import Graph def mkTestGraph4(): return Graph( ['a','b','c','d'], [ ('a','b'), ('b','c'), ('c','a'), ('a','d') ] ) def mkTestGraph4b(): ## isomorphic with 4 return Graph( ['a','c','b','d'], [ ('a','c'), ('b','c'), ('b','a'), ('a','d') ] ) return Graph( ['a','c','b','d'], [ ('a','c'), ('b','c'), ('b','a'), ('a','d') ] ) def mk5Clique(): return Graph( 5, [ (x,y) for x in range(5) for y in range(5) ] ) def mkTestGraph6(): return Graph( ['a','b','c','d','e','f'], [ ('a','b'), ('b','c'), ('c','d'), ('a','d'), ('d','b'), ('c','e') ] ) ''' Schematic of test graph 6 a -- b f | | (also edge between d and b) d -- c -- e rows of correct give number of shortest paths from a source node to all nodes ''' def mkTestGraph6b(): ## not isomorphic with 6 ## (d,b) edge replaced with ## (a,c) return Graph( ['a','b','c','d','e','f'], [ ('a','b'), ('b','c'), ('c','d'),
('a','d'), ('a','c'), ('c','e') ] ) def mkPetersenGraph(): return Graph(
10, [ (0,1),(1,2),(2,3),(3,4),(4,0), # outer polygon (5,6),(6,7),(7,8),(8,9),(9,5), # inner polygon (0,5),(1,8),(2,6),(3,9),(4,7) ] # btwn polygons ) class PossibleEdges: ## this keeps a list of edges (x,y) such that the ith ## edge has x at 2*i position and y at 2*i+1 position ## the order of the edges in the list doesn't matter ## and changes with each restart from array import array def __init__(me,numNodes): me.totalNum = int( 0.5 + numNodes*(numNodes-1.0)/2.0 ) me.edges = PossibleEdges.array('H',[0]*(2*me.totalNum)) me.last_idx = me.totalNum-1 edge_index = 0 for i in range(numNodes): for j in range(i+1,numNodes): me.edges[ edge_index*2 ] = i me.edges[ edge_index*2+1 ] = j edge_index += 1 assert edge_index-1 == me.last_idx def restart(me): me.last_idx = me.totalNum-1 def remove(me,idx): idx2 = 2*idx lx2 = 2*me.last_idx x = me.edges[idx2] y = me.edges[idx2+1] me.edges[idx2] = me.edges[lx2] me.edges[idx2+1] = me.edges[lx2+1] me.edges[lx2] = x me.edges[lx2+1] = y me.last_idx -= 1 return (x,y) class MakeRandom: from random import SystemRandom,seed,randrange,randrange seed( SystemRandom().random() ) def __init__(me,numNodes): me.numNodes = numNodes me.possible_edges = PossibleEdges(numNodes) def getEdges(me,numEdges): me.possible_edges.restart() assert numEdges > 0 and \ numEdges < me.possible_edges.totalNum, ( "MakeRandom: number of edges " "expected to be positive and less " " than total " "for an undirected graph without " "loops or multiple edges" ) count = 0 # print 'generating ' + str(me.possible_edges.totalNum) + \ # ' edge pairs ' edges = [] while count<numEdges: i = MakeRandom.randrange(me.possible_edges.totalNum) edges.append(me.possible_edges.remove(i)) count += 1 return edges def getIsoPair(me,density=0.5 ): ## return two graphs with different labelling numEdges = int( 0.5 + me.possible_edges.totalNum * density ) print "making isometric Pair with " + \ str(me.numNodes) + \ " nodes and " + str(numEdges) + " edges." edges = me.getEdges(numEdges) gph1 = Graph(me.numNodes,edges) return (gph1,gph1.relabelledClone()) def getNonIsoPair(me,density=0.5): ## return two graphs with different labelling ## they have same number of edges but one edge ## is different numEdges = int( 0.5 + me.possible_edges.totalNum * density ) print "making non isometric Pair with " + \ str(me.numNodes) + \ " nodes and " + str(numEdges) + " edges." edges = me.getEdges(numEdges+1) ## make graphs by removing a random edge i = MakeRandom.randrange(numEdges) j = i # for 2nd graph need random j different from i while j==i: j = MakeRandom.randrange(numEdges) return ( Graph(me.numNodes, edges[0:i] + edges[i+1:]), Graph(me.numNodes, edges[0:j] + edges[j+1:]). relabelledClone() )
if self.hist_arr[i] is not None: U.histogram(img, amin=self.hist_min[i], amax=self.hist_max[i], histArr=self.hist_arr[i]) self.hist[i].setHist(self.hist_arr[i], self.hist_min[i], self.hist_max[i]) else: resolution = 10000 a_h = U.histogram(img, resolution, mmms[0], mmms[1]) self.hist[i].setHist(a_h, mmms[0], mmms[1]) def autoFitHistL(self): for i in range( self.doc.nw ): self.hist[i].autoFit(amin=self.mmms[i][0], amax=self.mmms[i][1]) def OnHistToggleButton(self, ev=None, i=0, mode=None): if ev is not None: i = self.hist_toggleID2col[ ev.GetId() ] self.hist_show[i] = self.hist_toggleButton[i].GetValue() # 1-self.hist_show[i] # 'r': go "singleCHannelMode" -- show only channel i using grey scale, hide others if mode == 'r': if self.hist_singleChannelMode == i: # switch back to normal for ii in range(self.doc.nw): wave = self.doc.wave[ii]#mrcIO.getWaveFromHdr(self.doc.hdr, ii) label = str(wave) self.hist_toggleButton[ii].SetLabel(label) r, g, b = self.hist[ii].m_histGlRGB [v.setColor(ii, r, g, b, RefreshNow=ii==self.doc.nw-1) for v in self.viewers] [v.setVisibility(ii, self.hist_show[ii], RefreshNow=ii==self.doc.nw-1) for v in self.viewers] self.hist_singleChannelMode = None else: # active grey mode for color i only for ii in range(self.doc.nw): if ii == i: wave = self.doc.wave[ii]#mrcIO.getWaveFromHdr(self.doc.hdr, ii) label = str(wave) self.hist_toggleButton[ii].SetLabel(label) visible = self.hist_show[ii] [v.setColor(ii, 1,1,1, RefreshNow=ii==self.doc.nw-1) for v in self.viewers] else: self.hist_toggleButton[ii].SetLabel('--') visible = False [v.setVisibility(ii, visible, RefreshNow=ii==self.doc.nw-1) for v in self.viewers] self.hist_singleChannelMode = i # other mode: show all color channels (when hist_show[i] is true) else: if self.hist_singleChannelMode is not None: # switch back to normal for ii in range(self.doc.nw): wave = self.doc.wave[ii]#mrcIO.getWaveFromHdr(self.doc.hdr, ii) label = str(wave) self.hist_toggleButton[ii].SetLabel(label)#'%d'%ii) r, g, b = self.hist[ii].m_histGlRGB if self.hist_show[ii]: visible = True else: visible = False ## disable this wavelength; don't even show black [v.setColor(ii, r, g, b, RefreshNow=ii==self.doc.nw-1) for v in self.viewers] [v.setVisibility(ii, visible, RefreshNow=ii==self.doc.nw-1) for v in self.viewers] else: if self.hist_show[i]: visible = True else: visible = False ## disable this wavelength; don't even show black [
v.setVisibility(i, visible) for v in self.viewers] #self.doc._wIdx = [w for w, bl in enumerate(self.hist_show) if bl]
def OnZSliderBox(self, event=None): z = int(self.zSliderBox.GetValue()) if z >= self.doc.nz: z = self.doc.nz - 1 elif z < 0: z = 0 #while z < 0: #z = self.doc.nz + z self.set_zSlice(z) self.zSlider.SetValue(z) def OnZSlider(self, event): z = event.GetInt() self.set_zSlice(z) self.zSliderBox.SetValue(str(z)) def OnKeyZSlider(self, evnt): keycode = evnt.GetKeyCode() if keycode == wx.WXK_RIGHT: self.doc.z += 1 if self.doc.z >= self.doc.nz: self.doc.z = self.doc.nz - 1 elif keycode == wx.WXK_LEFT: self.doc.z -= 1 if self.doc.z < 0: self.doc.z = 0 self.zSliderBox.SetValue(str(self.doc.z)) self.set_zSlice(self.doc.z) self.zSlider.SetValue(self.doc.z) evnt.Skip() def OnKeyTSlider(self, evnt): keycode = evnt.GetKeyCode() if keycode == wx.WXK_RIGHT: self.doc.t += 1 if self.doc.t >= self.doc.nt: self.doc.t = self.doc.nt - 1 elif keycode == wx.WXK_LEFT: self.doc.t -= 1 if self.doc.t < 0: self.doc.t = 0 self.tSliderBox.SetValue(str(self.doc.t)) self.set_tSlice(self.doc.t) self.tSlider.SetValue(self.doc.t) evnt.Skip() def set_zSlice(self, z): self.doc.z = int(z) if self.doc.z >= self.doc.nz: self.doc.z = self.doc.nz elif self.doc.z < 0: self.doc.z = 0 ## insert # zsecTuple = tuple(self.zsec) #section-wise gfx: name=tuple(zsec) try: self.viewers[0].newGLListEnableByName((self.doc.zlast,), on=False, skipBlacklisted=True, refreshNow=False) except KeyError: pass try: self.viewers[0].newGLListEnableByName((self.doc.z,), on=True, skipBlacklisted=True, refreshNow=False) except KeyError: pass self.doc.zlast = z ##### end self.updateGLGraphics(list(range(len(self.viewers)))) self.recalcHistL(False) for i in range( self.doc.nw ): self.hist[i].Refresh(0) def OnTSliderBox(self, event): t = int(self.tSliderBox.GetValue()) if t >= self.doc.nt: t = self.doc.nt - 1 while t < 0: t = self.doc.nt + t self.set_tSlice(t) self.tSlider.SetValue(t) def OnTSlider(self, event): t = event.GetInt() self.set_tSlice(t) self.tSliderBox.SetValue(str(t)) def set_tSlice(self, t): self.doc.t = int(t) self.updateGLGraphics(list(range(len(self.viewers)))) self.recalcHistL(False) for i in range( self.doc.nw ): self.hist[i].Refresh(0) def takeSlice(self, axisSet=(0,1,2)): ''' return the slice of the data array (of all wavelengths) defined by time ti and the axis this slice is normal to: 0 -- z; 1 -- y; 2 -- x. self.alignParams[i]: (tz, ty, tx, rot, mag) ''' #t = self.doc.t nc = self.doc.nz / 2. retSlice = {} sliceIdx = [self.doc.z, self.doc.y, self.doc.x] # print 'takeSlice' for w in range(self.doc.nw): if hasattr(self.doc, 'alignParms'): tz, ty, tx, rot, magz, magy, magx = self.doc.alignParms[self.doc.t,w][:7] else: tz, ty, tx, rot, magz, magy, magx = 0, 0, 0, 0, 1, 1, 1 for axisSliceNormalTo in axisSet: # axis 0,1,2 shape = [self.doc.nz, self.doc.ny, self.doc.nx] shape.pop(axisSliceNormalTo) # projection shape retSlice.setdefault(axisSliceNormalTo, []).append(N.zeros(shape, self.doc.dtype)) # canvas if hasattr(self.doc, 'alignParms'): tc = self.doc.alignParms[self.doc.t,w, axisSliceNormalTo] else: tc = 0 ## if it's a x-y slice, or if there's no rotation, then use the simple slicing method # x-y view uses openGL to rotate and magnify if axisSliceNormalTo == 0: whichSlice = sliceIdx[axisSliceNormalTo] - tc#\ #self.doc.alignParms[self.doc.t,w, axisSliceNormalTo] whichSlice = round((whichSlice - nc) / float(magz) + nc)
init__( module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate ) def get_enabled_immediate(self, rule, timeout): # Convert the rule string to standard format # before checking whether it is present rule = str(Rich_Rule(rule_str=rule)) if rule in self.fw.getRichRules(self.zone): return True else: return False def get_enabled_permanent(self, rule, timeout): fw_zone, fw_settings = self.get_fw_zone_settings() # Convert the rule string to standard format # before checking whether it is present rule = str(Rich_Rule(rule_str=rule)) if rule in fw_settings.getRichRules(): return True else: return False def set_enabled_immediate(self, rule, timeout): self.fw.addRichRule(self.zone, rule, timeout) def set_enabled_permanent(self, rule, timeout): fw_zone, fw_settings = self.get_fw_zone_settings() fw_settings.addRichRule(rule) self.update_fw_settings(fw_zone, fw_settings) def set_disabled_immediate(self, rule, timeout): self.fw.removeRichRule(self.zone, rule) def set_disabled_permanent(self, rule, timeout): fw_zone, fw_settings = self.get_fw_zone_settings() fw_settings.removeRichRule(rule) self.update_fw_settings(fw_zone, fw_settings) class SourceTransaction(FirewallTransaction): """ SourceTransaction """ def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False): super(SourceTransaction, self).__init__( module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate ) self.enabled_msg = "Added %s to zone %s" % \ (self.action_args[0], self.zone) self.disabled_msg = "Removed %s from zone %s" % \ (self.action_args[0], self.zone) def get_enabled_immediate(self, source): if source in self.fw.getSources(self.zone): return True else: return False def get_enabled_permanent(self, source): fw_zone, fw_settings = self.get_fw_zone_settings() if source in fw_settings.getSources(): return True else: return False def set_enabled_immediate(self, source): self.fw.addSource(self.zone, source) def set_enabled_permanent(self, source): fw_zone, fw_settings = self.get_fw_zone_settings() fw_settings.addSource(source) self.update_fw_settings(fw_zone, fw_settings) def set_disabled_immediate(self, source): self.fw.removeSource(self.zone, source) def set_disabled_permanent(self, source): fw_zone, fw_settings = self.get_fw_zone_settings() fw_settings.removeSource(source) self.update_fw_settings(fw_zone, fw_settings) class ZoneTransaction(FirewallTransaction): """ ZoneTransaction """ def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=True, immediate=False, enabled_values=None, disabled_values=None): super(ZoneTransaction, self).__init__( module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate, enabled_values=enabled_values or ["present"], disabled_values=disabled_values or ["absent"]) self.enabled_msg = "Added zone %s" % \ (self.zone) self.disabled_msg = "Removed zone %s" % \ (self.zone) self.tx_not_permanent_error_msg = "Zone operations must be permanent. " \ "Make sure you didn't set the 'permanent' flag to 'false' or the 'immediate' flag to 'true'." def get_enabled_immediate(self): self.module.fail_json(msg=self.tx_not_permanent_error_msg) def get_enabled_permanent(self): zones = self.fw.config().listZones() zone_names = [self.fw.config().getZone(z).get_property("name") for z in zones] if self.zone in zone_names: return True else: return False def set_enabled_immediate(self): self.module.fail_json(msg=self.tx_not_permanent_error_msg) def set_enabled_permanent(self): self.fw.config().addZone(self.zone, FirewallClientZoneSettings()) def set_disabled_immediate(self): self.module.fail_json(msg=self.tx_not_permanent_error_msg) def set_disabled_permanent(self): zone_obj = self.fw.config().getZoneByName(self.zone) zone_obj.remove() def main(): module = AnsibleModule( argument_spec=dict( icmp_block=dict(type='str'), icmp_block_inversion=dict(type='str'), service=dict(type='str'), port=dict(type='str'), rich_rule=dict(type='str'), zone=dict(type='str'), immediate=dict(type='bool', default=False), source=dict(type='str'), permanent=dict(type='bool'), state=dict(type='str', required=True, choices=['absent', 'disabled', 'enabled', 'present']), timeout=dict(type='int', default=0), interface=dict(type='str'), masquerade=dict(type='str'), offline=dict(type='bool'), ), supports_check_mode=True ) permanent = module.params['permanent'] desired_state = module.params['state'] immediate = module.params['immediate'] timeout = module.params['timeout'] interface = module.params['interface'] masquerade = module.params['masquerade'] # Sanity checks FirewallTransaction.sanity_check(module) # If neither permanent or immediate is provided, assume immediate (as # written in the module's docs) if not permanent and not immediate: immediate = True # Verify required params are provided if immediate and fw_offline: module.fail_json(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon') changed = False msgs = [] icmp_block = module.params['icmp_block'] icmp_block_inversion = module.params['icmp_block_i
nversion'] service = module.params['service'] rich_rule = module.params['rich_rule'] source = module.params['source'] zone = module.params['zone'] if module.params['port'] is not None: if '/' in module.params['port']: port, protocol = module.params['port'].strip().split('/') else: protocol = None if not protocol: module.fa
il_json(msg='improper port format (missing protocol?)') else: port = None modification_count = 0 if icmp_block is not None: modification_count += 1 if icmp_block_inversion is not None: modification_count += 1 if service is not None: modification_count += 1 if port is not None: modification_count += 1 if rich_rule is not None: modification_count += 1 if interface is not None: modification_count += 1 if masquerade is not None: modification_count += 1 if modification_count > 1: module.fail_json( msg='can only operate on port, service, rich_rule, masquerade, icmp_block, icmp_block_inversion, or interface at once' ) elif modification_count > 0 and desired_state in ['absent', 'present']: module.fail_json( msg='absent and present state can only be used in zone level operations' ) if icmp_block is not None: transaction = IcmpBlockTransaction( module, action_args=(icmp_block, timeout), zone=zone, desired_state=desired_state, permanent=permanent, immediate=immediate, ) changed, transaction_msgs = transaction.run() msgs = msgs + transaction_msgs if changed is True: msgs.append("Changed icmp-block %s to %s" % (icmp_block, desired_state))
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Flag.testing' db.add_column('waffle_flag', 'testing', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): # Deleting field 'Flag.testing' db.delete_column('waffle_flag', 'testing') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.m
odels.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False
'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'waffle.flag': { 'Meta': {'object_name': 'Flag'}, 'authenticated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'everyone': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}), 'rollout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'superusers': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'testing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}) }, 'waffle.sample': { 'Meta': {'object_name': 'Sample'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}) }, 'waffle.switch': { 'Meta': {'object_name': 'Switch'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}) } } complete_apps = ['waffle']
from datetime import datetime class ModelManager(object): def __init__(self, db, collection_name, has_stats=False, **kwargs): self.property_helper = None self.log_helper = None self.collection_name = collection_name self.db = db if 'logger' in kwargs: self.log_helper = kwargs['logger'] if collection_name in self.db.collection_names(): self.collection = self.db[collection_name] else: self.collection = self.db.create_collection(collection_name) if has_stats: self.add_stats_collection() def add_stats_collection(self): self.stats_collection_name = '%s_stats' % self.collection_name if self.stats_collection_name in self.db.collection_names(): self.stats_collection = self.db[self.stats_collection_name] else: self.stats_collection = self.db.create_collection(self.stats_collection_name) def close_connection(self): pass def save_object(self, instance): instance.validate_fields() return self.collection.save(instance.get_as_dict()) #deprecated: name is confusing def save_document(self, document): document.validate_fields() return self.collection.save(document.get_as_dict()) def set_property_helper(self, property_helper): self.property_helper = property_helper def set_log_helper(self, log_helper): self.log_helper = log_helper def __getattr__(self,attr): orig_attr =
self.collection.__getattribute__(attr) if callable(orig_attr): def hooked(*args, **kwargs): result = orig_attr(*args, **kwargs) # prevent wrapped_class from becoming unwrapped if result == self.collection: return self return result return hooked else: return orig_attr def get_local_time(self, date_format='d
atetime'): if self.property_helper is None: return datetime.now() return self.property_helper.get_local_time(date_format) def log(self, msg, level='msg'): if self.log_helper is not None: self.log_helper.log(msg, level) def drop(self): self.collection.drop() if hasattr(self, 'stats_collection'): self.stats_collection.drop()
************************************* from latticeCommon import * import latticeBaseFeature import latticeExecuter import latticeCompoundExplorer as LCE from latticeBoundBox import getPrecisionBoundBox #needed for alignment import FreeCAD as App import Part from Draft import _ShapeString __title__="BoundingBox module for FreeCAD" __author__ = "DeepSOIC" __url__ = "" def findFont(font_file_name): '''checks for existance of the file in a few locations and returns the full path of the first one found''' import os if os.path.isabs(font_file_name): if not os.path.exists(font_file_name): raise ValueError("Font file not found: " + font_file_name ) return font_file_name dirlist = [] #list of directories to probe import latticeDummy lattice_path = os.path.dirname(latticeDummy.__file__) dirlist.append(lattice_path + "/fonts") if len(App.ActiveDocument.FileName) > 0: dirlist.append(os.path.dirname(App.ActiveDocument.FileName)+"/fonts") dirlist.append(os.path.abspath(os.curdir)) #todo: figure out the path to system fonts, and add it here #do the probing for _dir in dirlist: if os.path.exists(_dir + "/" + font_file_name): return _dir + "/" + font_file_name raise ValueError("Font file not found: "+font_file_name +". Locations probed: \n"+'\n'.join(dirlist)) # -------------------------- document object -------------------------------------------------- def makeLatticeShapeString(name): '''makeBoundBox(name): makes a BoundBox object.''' obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name) LatticeShapeString(obj) ViewProviderLatticeShapeString(obj.ViewObject) return obj class FoolFeatureDocumentObject: '''A class that is to be fed to Draft ShapeString object instead of a real one, to obtain shapes it generates''' def __init__(self): self.Placement = App.Placement() self.Shape = Part.Shape() self.properties = [] self.Proxy = None def addProperty(self, proptype, propname, group = None, hint = None): setattr(self,propname,None) self.properties.append((proptype, propname, group, hint)) class LatticeShapeString: "The LatticeShapeString object" def __init__(self,obj): self.Type = "LatticeShapeString" #initialize accompanying Draft ShapeString self.makeFoolObj(obj) foolObj = self.foolObj #add Draft ShapeString's properties to document object in posession of our LatticeShapeString for (proptype, propname, group, hint) in foolObj.properties: if propname != "String": #we'll define our own string property obj.addProperty(proptype,propname,"Lattice ShapeString",hint) obj.addProperty("App::PropertyLink","ArrayLink","Lattice ShapeString","array to use for the shapestring") obj.addProperty("App::PropertyStringList","Strings","Lattice ShapeString","Strings to put at each placement.") obj.addProperty("App::PropertyEnumeration","XAlign","Lattice ShapeString","Horizontal alignment of individual strings") obj.XAlign = ['None','Left','Right','Middle'] obj.addProperty("App::PropertyEnumeration","YAlign","Lattice ShapeString","Vertical alignment of individual strings") obj.YAlign = ['None','Top','Bottom','Middle'] obj.addProperty("App::PropertyBool","AlignPrecisionBoundBox","Lattice ShapeString","Use precision bounding box for alignment. Warning: slow!") obj.addProperty("App::PropertyFile","FullPathToFont","Lattice ShapeString","Full path of font file that is actually being used.") obj.setEditorMode("FullPathToFont", 1) # set read-only obj.Proxy = self self.setDefaults(obj) def makeFoolObj(self,obj): '''Makes an object that mimics a Part::FeaturePython, and makes a Draft ShapeString object on top of it. Both are added as attributes to self. This is needed to re-use Draft ShapeString''' if hasattr(self, "foolObj"): return foolObj = FoolFeatureDocumentObject() self.draft_shape_string = _ShapeString(foolObj) self.foolObj = foolObj def setDefaults(self, obj): '''initializes the properties, so that LatticeShapeString can be used with no initial fiddling''' obj.FontFile = "FreeUniversal-Regular.ttf" obj.Size = 10 obj.Tracking = 0 obj.Strings = ['string1','string2'] def execute(self,obj): nOfStrings = len(obj.Strings) lattice = obj.ArrayLink if lattice is None: plms = [App.Placement() for i in range(0,nOfStrings)] else: if not latticeBaseFeature.isObjectLattice(lattice): latticeExecuter.warning(obj,"ShapeString's link to array must point to a lattice. It points to a generic shape. Results may be unexpected.") leaves = LCE.AllLeaves(lattice.Shape) plms = [leaf.Placement for leaf in leaves] #update foolObj's properties self.makeFoolObj(obj) #make sure we have one - fixes defunct Lattice ShapeString after save-load for (proptype, propname, group, hint) in self.foolObj.properties: if propname != "String": #ignore "String", that will be taken care of in the following loop setattr(self.foolObj, propname, getattr(obj, propname)) self.foolObj.FontFile = findFont(obj.FontFile) obj.FullPathToFont = self.foolObj.FontFile shapes = [] for i in range( 0 , min(len(plms),len(obj.Strings)) ): if len(obj.Strings[i]) > 0: #generate shapestring using Draft self.foolObj.String = obj
.Strings[i] self.foolObj.Shape = None self.draft_shape_string.execute(self.foolObj) shape = self.foolObj.Shape #calculate alignment point if obj.XAlign == 'None' and obj.YAlign == 'None': pass #need not calculate boundbo
x else: if obj.AlignPrecisionBoundBox: bb = getPrecisionBoundBox(shape) else: bb = shape.BoundBox alignPnt = App.Vector() if obj.XAlign == 'Left': alignPnt.x = bb.XMin elif obj.XAlign == 'Right': alignPnt.x = bb.XMax elif obj.XAlign == 'Middle': alignPnt.x = bb.Center.x if obj.YAlign == 'Bottom': alignPnt.y = bb.YMin elif obj.YAlign == 'Top': alignPnt.y = bb.YMax elif obj.YAlign == 'Middle': alignPnt.y = bb.Center.y #Apply alignment shape.Placement = App.Placement(alignPnt*(-1.0), App.Rotation()).multiply(shape.Placement) #Apply placement from array shape.Placement = plms[i].multiply(shape.Placement) shapes.append(shape.copy()) if len(shapes) == 0: scale = 1.0 if lattice is not None: scale = lattice.Shape.BoundBox.DiagonalLength/math.sqrt(3)/math.sqrt(len(shps)) if scale < DistConfusion * 100: scale = 1.0 obj.Shape = markers.getNullShapeShape(scale) raise ValueError('No strings were converted into shapes') #Feeding empty compounds to FreeCAD seems to cause rendering issues, otherwise it would have been a good idea to output nothing. obj.Shape = Part.makeCompound(shapes) def __getstate__(self): return None def __setstate__(self,state): return None class ViewProviderLatticeShapeString: "A View Provider for the LatticeShapeString object" def __init__
# # Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com # # This file is part of EigenD. # # EigenD is free software: you can
redistribute it
and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EigenD is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EigenD. If not, see <http://www.gnu.org/licenses/>. # import macosx_native def main(): macosx_native.probe_coreaudio(True,True)
#! /usr/bin/python # -*- coding: utf-8 -*- import os import sys import logging import time import logging.config dir_cur = os.path.normpath(os.path.dirname(os.path.abspath(__file__)).split('bin')[0]) if dir_cur not in sys.path: sys.path.insert(0, dir_cur) log_dir = os.path.normpath(dir_cur + os.path.sep + 'logs' + os.path.sep + 'snmp_trap_logs') if not os.path.isdir(log_dir): os.makedirs(log_dir) log_file = os.path.normpath(log_dir + os.path.sep + "snmp_trap_v2v3_" + str(os.getpid()) + ".log") logging_config = os.path.normpath(dir_cur + os.path.sep + 'config' + os.path.sep + 'snmp_trap' + os.path.sep + 'logging.config') logging.config.fileConfig(logging_config, defaults={'log_file': log_file}) from com.ericsson.xn.commons import CommonUtil CommonUtil.pre_check(systems=['Linux', 'Windows', 'Darwin']) from com.ericsson.xn.snmp import SnmpTrapUtils sep = os.path.sep snmp_conf = dir_cur + sep + 'config' + sep + 'snmp_trap' template_dir = os.path.normpath(snmp_conf + sep + 'templates') mapping_file = os.path.normpath(snmp_conf + sep + 'mappings') alarm_id_file = os.path.normpath(snmp_conf + sep + 'oids' + sep + 'alarmid_oids') timestamp_file = os.path.normpath(snmp_conf + sep + 'oids' + sep + 'time_oids') v3_auth_file = os.path.normpath(snmp_conf + sep + 'v3_auth') id_file = os.path.normpath(snmp_conf + sep + 'id') options = SnmpTrapUtils.get_and_check_options() traps_map = SnmpTrapUtils.read_used_trap_templates(options, template_dir, mapping_file, alarm_id_file, timestamp_file) logging.debug('**Start to send traps**') if not traps_map: msg = 'Fail to read the alarm template files.' logging.error(msg) # print msg else: client_ip = None if '' == options.clientip else options.clientip engine_id = None if '' == options.engineid else options.engineid list_engine = SnmpTrapUtils.init_trap_engine(traps_map, options, v3_auth_file, client_ip,engine_id) if not list_engine: msg = 'Fail to init the trap engines.' logging.error(msg) # print msg else: if 'n' == options.mode: t = SnmpTrapUtils.SendTrapNormal(options, traps_map, list_engine[0], list_engine[1], list_engine[2], list_engine[3], id_file) try: t.start() while not t.b_stop: time.sleep(.5) except KeyboardInterrupt: t.stop() msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now." logging.info(msg) print msg # print msg elif 'c' == options.mode: if 1 < len(options.list.split(',')): msg = "We can only send one alarm in Clear mode, you have feed more than one alarm " \ "IDs for the '--list' option." logging.critical(msg) else: t = SnmpTrapUtils.SendTrapNormal(options, traps_map, list_engine[0], list_engine[1], list_engine[2], list_engine[3], id_file, False) try: t.start() while not t.b_stop: time.sleep(.5) except KeyboardInterrupt: msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now." logging.info(msg) print msg elif 's' == options.mode: try: t = SnmpTrapUtils.SendTrapDurationMode(options, traps_map, list_engine[0], list_engine[1], list_engine[2], list_engine[3], id_file) t.start() while not t.b_stop: time.sleep(.5) except KeyboardInterrupt: msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now." logging.info(msg)
print msg t.stop() elif 'p' == options.mode: if 0 != len(options.list.split(',')) % 2: msg = "In pare s
torm mode, number of alarms should be an EVEN number, otherwise there will be mismatch." logging.critical(msg) else: try: t = SnmpTrapUtils.SendTrapDurationMode(options, traps_map, list_engine[0], list_engine[1], list_engine[2], list_engine[3], id_file, True) t.start() while not t.b_stop: time.sleep(.5) except KeyboardInterrupt: msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now." logging.info(msg) print msg t.stop() elif 'sn' == options.mode: try: t = SnmpTrapUtils.SendTrapDurationModeNonAps(options, traps_map, list_engine[0], list_engine[1], list_engine[2], list_engine[3], id_file) t.start() while not t.b_stop: time.sleep(.5) except KeyboardInterrupt: msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now." logging.info(msg) print msg t.stop() elif 'pn' == options.mode: if 0 != len(options.list.split(',')) % 2: msg = "In pare storm mode, number of alarms should be an EVEN number, otherwise there will be mismatch." logging.critical(msg) else: try: t = SnmpTrapUtils.SendTrapDurationModeNonAps(options, traps_map, list_engine[0], list_engine[1], list_engine[2], list_engine[3], id_file, True) t.start() while not t.b_stop: time.sleep(.5) except Exception as e: logging.error(str(e)) print str(e) t.stop() else: msg = "Other mode is not supported yet, exit now." logging.critical(msg) logging.debug('**End of sending traps**')
from cl.api import views from cl.audio import api_views as audio_views from cl.people_db import api_views as judge_views from cl.search import api_views as search_views from django.conf.urls import url, include from rest_framework.routers import DefaultRouter router = DefaultRouter() # Search & Audio router.register(r'dockets', search_views.DocketViewSet) router.register(r'courts', search_views.CourtViewSet) router.register(r'audio', audio_views.AudioViewSet) router.register(r'clusters', search_views.OpinionClusterViewSet) router.register(r'opinions', search_views.OpinionViewSet) router.register(r'opinions-cited', search_views.OpinionsCitedViewSet) router.register(r'search', search_views.SearchViewSet, base_name='search') # Judges router.register(r'people', judge_views.PersonViewSet) router.register(r'positions', judge_views.PositionViewSet) router.registe
r(r'retention-events', judge_views.RetentionEventViewSet) router.register(r'educations
', judge_views.EducationViewSet) router.register(r'schools', judge_views.SchoolViewSet) router.register(r'political-affiliations', judge_views.PoliticalAffiliationViewSet) router.register(r'sources', judge_views.SourceViewSet) router.register(r'aba-ratings', judge_views.ABARatingViewSet) urlpatterns = [ url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^api/rest/(?P<version>[v3]+)/', include(router.urls)), # Documentation url(r'^api/$', views.api_index, name='api_index'), url(r'^api/jurisdictions/$', views.court_index, name='court_index'), url(r'^api/rest-info/(?P<version>v[123])?/?$', views.rest_docs, name='rest_docs'), url(r'^api/bulk-info/$', views.bulk_data_index, name='bulk_data_index'), url(r'^api/rest/v(?P<version>[123])/coverage/(?P<court>.+)/$', views.coverage_data, name='coverage_data'), # Pagerank file url(r'^api/bulk/external_pagerank/$', views.serve_pagerank_file, name='pagerank_file'), # Deprecation Dates: # v1: 2016-04-01 # v2: 2016-04-01 url(r'^api/rest/v(?P<v>[12])/.*', views.deprecated_api, name='deprecated_api'), ]
l, r = [int(x) for x in input().split()] if max(l,r) == 0:
print("Not a moose") elif l == r: print("Even {}".format(l+r)) else: print("Odd {
}".format(max(l,r)*2))
""" Created on April 14, 2017 @author Miguel Contreras M
orales """ import QueryTool import datetime import cherrypy as QueryServer
import os if __name__ == "__main__": """ This initializes CherryPy services + self - no input required """ print "Intializing!" portnum = 9100 # start the QeueryServer QueryServer.config.update({'server.socket_host' : '127.0.0.1', 'server.socket_port': portnum, 'server.socket_timeout': 600, 'server.thread_pool' : 8, 'server.max_request_body_size': 0 }) wwwPath = os.path.join(os.getcwd(),'www') print wwwPath staticdir = './www' print staticdir conf = { '/': { 'tools.sessions.on': True, 'tools.staticdir.on': True, 'tools.staticdir.dir': wwwPath } } QueryServer.quickstart(QueryTool.QueryTool(dbaddress="10.30.5.203:27017", path= wwwPath), '/', conf)
fr
om bokeh.util.deprecate import deprecated_module deprecated_module('bokeh.properties', '0.11', 'use bokeh.co
re.properties instead') del deprecated_module from .core.properties import * # NOQA
# -*- coding: utf-8 -*- # Copyright 2017 OpenSynergy Indonesia # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). { "name": "Indonesia - Bukti Potong P
Ph 4 Ayat 2 (F.1.1.33.09)", "version": "8.0.1.1.0", "category": "localization", "website": "https://opensynergy-indonesia.com/", "author": "OpenSynergy Indonesia", "license": "AGPL-3", "application": False, "installable": True, "depends": [ "l10n_id_taxform_bukti_potong_pph_common", ], "data": [ "security/ir.model.access.csv", "data/l10n_id_bukti_potong_type.xml", "views/bukti_potong_pph_f113309_in_views
.xml", "views/bukti_potong_pph_f113309_out_views.xml", ], }
import gzip import os import numpy as np import cPickle as pickle import six from six.moves.urllib import request import scipy from scipy import io # from sklearn import decomposition ''' BVH ''' def load_bvh_data(file_path): frames = 0 frame_time = 0.0 with open(file_path, "rb") as f: lines = f.readlines() n = 0 while lines[n].find('MOTION') < 0: n += 1 assert n < len(lines) # frames n += 1 frames = int(lines[n].split(" ")[-1].replace('\n', '')) # frame time n += 1 frame_time = float(lines[n].split(" ")[-1].replace('\n', '')) # motion data n += 1 for i in range(frames): motion = lines[n + i].split(' ') if i == 0: dim = len(motion) global motion_data motion_data = np.zeros(frames * dim, dtype=np.float32).reshape((frames, dim)) for j in range(dim): motion_data[i, j] = float(motion[j].replace('\n', '')) return frames, frame_time, motion_data ''' MNIST ''' def load_mnist(images, labels, num): dim = 784 data = np.zeros(num * dim, dtype=np.uint8).reshape((num, dim)) target = np.zeros(num, dtype=np.uint8).reshape((num, )) with gzip.open(images, 'rb') as f_images,\
gzip.open(labels, 'rb') as f_labels: f_images.read(16) f_labels.read(8) for i in six.moves.range(num): target[i] = ord(f_labels.read(1)) for j in six.moves.range(dim): data[i, j] = ord(f_images.read(1)) return data, target def download_mnist_data(data_dir): parent = 'http://yann.lecun.com/exdb/mnist' train_images = 'train-images-idx3-
ubyte.gz' train_labels = 'train-labels-idx1-ubyte.gz' test_images = 't10k-images-idx3-ubyte.gz' test_labels = 't10k-labels-idx1-ubyte.gz' num_train = 60000 num_test = 10000 print('Downloading {:s}...'.format(train_images)) request.urlretrieve('{:s}/{:s}'.format(parent, train_images), train_images) print('Done') print('Downloading {:s}...'.format(train_labels)) request.urlretrieve('{:s}/{:s}'.format(parent, train_labels), train_labels) print('Done') print('Downloading {:s}...'.format(test_images)) request.urlretrieve('{:s}/{:s}'.format(parent, test_images), test_images) print('Done') print('Downloading {:s}...'.format(test_labels)) request.urlretrieve('{:s}/{:s}'.format(parent, test_labels), test_labels) print('Done') print('Converting training data...') data_train, target_train = load_mnist(train_images, train_labels, num_train) print('Done') print('Converting test data...') data_test, target_test = load_mnist(test_images, test_labels, num_test) mnist = {} mnist['data'] = np.append(data_train, data_test, axis=0) mnist['target'] = np.append(target_train, target_test, axis=0) print('Done') print('Save output...') with open('%s/mnist/mnist.pkl' % data_dir, 'wb') as output: six.moves.cPickle.dump(mnist, output, -1) print('Done') print('Convert completed') def load_mnist_data(data_dir): if not os.path.exists('%s/mnist/mnist.pkl' % data_dir): download_mnist_data(data_dir) with open('%s/mnist/mnist.pkl' % data_dir, 'rb') as mnist_pickle: mnist = six.moves.cPickle.load(mnist_pickle) return mnist ''' SVHN ''' def download_svhn_data(data_dir): parent = 'http://ufldl.stanford.edu/housenumbers' train_images = 'train_32x32.mat' test_images = 'test_32x32.mat' data_path = data_dir+"/SVHN/" if not os.path.exists(data_path): os.mkdir(data_path) print('Downloading {:s}...'.format(train_images)) request.urlretrieve('{:s}/{:s}'.format(parent, train_images), data_path+train_images) print('Done') print('Downloading {:s}...'.format(test_images)) request.urlretrieve('{:s}/{:s}'.format(parent, test_images), data_path+test_images) print('Done') def svhn_pickle_checker(data_dir): if os.path.exists(data_dir+'/SVHN/train_x.pkl') and os.path.exists(data_dir+'/SVHN/train_y.pkl') \ and os.path.exists(data_dir+'/SVHN/test_x.pkl') and os.path.exists(data_dir+'/SVHN/test_y.pkl'): return 1 else: return 0 def load_svhn(data_dir, toFloat=True, binarize_y=True, dtype=np.float32, pca=False, n_components=1000): # if svhn_pickle_checker(data_dir) == 1: # print "load from pickle file." # train_x = pickle.load(open(data_dir+'/SVHN/train_x.pkl')) # train_y = pickle.load(open(data_dir+'/SVHN/train_y.pkl')) # test_x = pickle.load(open(data_dir+'/SVHN/test_x.pkl')) # test_y = pickle.load(open(data_dir+'/SVHN/test_y.pkl')) # # return train_x, train_y, test_x, test_y if not os.path.exists(data_dir+'/SVHN/train_32x32.mat') or not os.path.exists(data_dir+'/SVHN/test_32x32.mat'): download_svhn_data(data_dir) train = scipy.io.loadmat(data_dir+'/SVHN/train_32x32.mat') train_x = train['X'].swapaxes(0,1).T.reshape((train['X'].shape[3], -1)) train_y = train['y'].reshape((-1)) - 1 test = scipy.io.loadmat(data_dir+'/SVHN/test_32x32.mat') test_x = test['X'].swapaxes(0,1).T.reshape((test['X'].shape[3], -1)) test_y = test['y'].reshape((-1)) - 1 if toFloat: train_x = train_x.astype(dtype)/256. test_x = test_x.astype(dtype)/256. if binarize_y: train_y = binarize_labels(train_y) test_y = binarize_labels(test_y) # if pca: # x_stack = np.vstack([train_x, test_x]) # pca = decomposition.PCA(n_components=n_components) # pca.whiten=True # # pca.fit(x_stack) # # x_pca = pca.transform(x_stack) # x_pca = pca.fit_transform(x_stack) # train_x = x_pca[:train_x.shape[0], :] # test_x = x_pca[train_x.shape[0]:, :] # # with open('%s/SVHN/pca.pkl' % data_dir, "wb") as f: # pickle.dump(pca, f) # with open('%s/SVHN/train_x.pkl' % data_dir, "wb") as f: # pickle.dump(train_x, f) # with open('%s/SVHN/train_y.pkl' % data_dir, "wb") as f: # pickle.dump(train_y, f) # with open('%s/SVHN/test_x.pkl' % data_dir, "wb") as f: # pickle.dump(test_x, f) # with open('%s/SVHN/test_y.pkl' % data_dir, "wb") as f: # pickle.dump(test_y, f) return train_x, train_y, test_x, test_y def binarize_labels(y, n_classes=10): new_y = np.zeros((y.shape[0], n_classes)) for i in range(y.shape[0]): new_y[i, y[i]] = 1 return new_y.astype(np.float32) ''' Shakespeare ''' def load_shakespeare(data_dir): vocab = {} words = open('%s/tinyshakespeare/input.txt' % data_dir, 'rb').read() words = list(words) dataset = np.ndarray((len(words), ), dtype=np.int32) for i, word in enumerate(words): if word not in vocab: vocab[word] = len(vocab) dataset[i] = vocab[word] return dataset, words, vocab ''' music ''' def load_midi_data(data_dir): import midi.utils as utils from midi import MidiInFile as mf from midi import MidiToText as mt f = open(data_dir, 'rb') midiIn = mf.MidiInFile(mt.MidiToText(), f) midiIn.read() f.close() midi_data = utils.midiread(data_dir, dt=0.5) return midi_data.piano_roll
#!/usr/bin/env python
from livereload import Server, shell server = Server() style = ("style.scss", "style.css") script = ("typing-test.js", "typing-test-compiled.js") server.watch(style[0], shell(["sass", style[0]], output=style[1])) server.watch(script[0], shell(["babel", script[0]], output=script[1])) server.watch("index.html") server.serve(port=8080, host="local
host", open_url=True)
from collections import Counter def TFIDF(TF, complaints, term): if TF >= 1: n = len(complaints) x = sum([1 for complaint in complaints if term in complaint['bo
dy']]) return log(TF + 1) * log(n / x)
else: return 0 def DF(vocab, complaints): term_DF = dict() for term in vocab: term_DF[term] = sum([1 for complaint in complaints if term in complaint['body']]) threshold = 3 features = [term for term in term_DF.keys() if term_DF[term] > threshold] return features def chi_square(vocab, complaints, categories): features = [] chi_table = dict() N = len(complaints) for term in vocab: chi_table[term] = dict() for category in categories: chi_table[term][category] = dict() A = 0 B = 0 C = 0 D = 0 for complaint in complaints: if term in complaint['body'] and complaint['category'] == category: A += 1 if term in complaint['body'] and complaint['category'] != category: B += 1 if term not in complaint['body'] and complaint['category'] == category: C += 1 if term not in complaint['body'] and complaint['category'] != category: D += 1 try: chi_table[term][category]['chi'] = (N * ((A * D) - (C * B))**2) / ((A + C) * (B + D) * (A + B) * (C + D)) chi_table[term][category]['freq'] = A + C except ZeroDivisionError: print(term) print(category) print(A) print(B) print(C) print(D) input() pass chi_table[term]['chi_average'] = float() for category in categories: P = chi_table[term][category]['freq'] / N chi_table[term]['chi_average'] += P * chi_table[term][category]['chi'] if chi_table[term]['chi_average'] > 3: features.append(term) print('Extracted {0} features'.format(len(features))) return features
# Copyright 2016-17 Eficent Business and IT Consulting Services S.L. # (http://www.eficent.com) # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). from odoo import api, models class StockChangeStandardPrice(models.TransientModel): _inherit = "stock.change.standard.price" @api.model def default_get(self, fields):
res = super(StockChangeStandardPrice, self).default_get(fields) product_or_template = self.env[self._context['active_model']].browse( self._context['active_id']) if 'counterpart_account_id' in fields: # We can only use one account here, so we use the decrease # account. It will be ignored anyway, because
we'll use the # increase/decrease accounts defined in the product category. res['counterpart_account_id'] = product_or_template.categ_id. \ property_inventory_revaluation_decrease_account_categ.id return res
import unittest from pyml.nearest_neighbours import KNNClassifier, KNNRegressor from pyml.datasets import gaussian, regression from pyml.preprocessing import train_test_split class TestKNNClassifier(unittest.TestCase): @classmethod def setUpClass(cls): cls.datapoints, cls.labels = gaussian(n=100, d=2, labels=3, sigma=0.1, seed=1970) cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.datapoints, cls.labels, train_split=0.95, seed=1970) cls.classifier = KNNClassifier(n=5) cls.classifier.train(X=cls.X_train, y=cls.y_train) def test_train(self): self.assertEqual(self.classifier.X, self.X_train) def test_predict(self): predictions = self.classifier.predict(X=self.X_test) self.assertEqual(predictions, [2, 2, 0, 0, 2, 0, 2, 2, 1, 1, 2, 0, 2, 2, 0]) def test_score(self): accuracy = self.classifier.score(X=self.X_test, y_true=self.y_test) self.assertEqual(accuracy, 1.0) class TestKNNRegressor(unittest.TestCase): @classmethod def setUpClass(cls): cls.X, cls.y = regression(100, seed=1970) cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y, train_split=0.8, seed=1970)
cls.regressor = KNNRegressor(n=5) cls.regressor.train(X=cls.X_train, y=cls.y_train) def test_train(self): self.assertEqual(self.regressor.X, self.X_train)
def test_predict(self): predictions = self.regressor.predict(X=self.X_test) self.assertEqual(predictions[:5], [3.1161666191379163, 4.933573052500679, 6.611283497257544, 9.185848057766739, 3.110023909806445]) def test_score_mse(self): mse = self.regressor.score(X=self.X_test, y_true=self.y_test, scorer='mse') self.assertEqual(mse, 1.5470835956432736) def test_score_mae(self): mae = self.regressor.score(X=self.X_test, y_true=self.y_test, scorer='mae') self.assertEqual(mae, 1.024567537840727)
""" Default urlconf for noisefilter """ from django.conf import settings from django.conf.urls import include, url from django.conf.urls.static import static from django.contrib
import admin from django.contrib.sitemaps.views import index, sitemap from django.views.generic.base import TemplateView from django.views.defaults import (permission_denied, page_not_found, server_error) sitemaps = { # Fill me with sitemaps } admin.autodiscover() urlpatterns = [ url(r'', include('filter.urls')), url(r'base', include('base.urls')),
# Admin url(r'^admin/', include(admin.site.urls)), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Sitemap url(r'^sitemap\.xml$', index, {'sitemaps': sitemaps}), url(r'^sitemap-(?P<section>.+)\.xml$', sitemap, {'sitemaps': sitemaps}), # robots.txt url(r'^robots\.txt$', TemplateView.as_view( template_name='robots.txt', content_type='text/plain') ), ] if settings.DEBUG: # Add debug-toolbar import debug_toolbar #noqa urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls))) # Serve media files through Django. urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # Show error pages during development urlpatterns += [ url(r'^403/$', permission_denied), url(r'^404/$', page_not_found), url(r'^500/$', server_error) ]
#!/usr/bin/env python """ rpgtoolkit.py Generate a random webpage from a config file. Lots of gaming resources are simple variations on a theme. Here's a big list, choose a random thing from the list, and interpolate a bit using data from some other lists. Here's how this program works: given a config file, figure out how to make a website from it. It looks for the "meta" config hash to figure out how to kick itself off. It also knows how to interpolate simple variables. Created by Justin McGuire <jm@landedstar.com>. """ import sys import random import re import yaml import os import logging class ToolConfig: config = None def __init__(self, config_file): self.load_config(config_file) self.title = self.config['meta']['title'] self.copyright = self.config['meta']['copyright'] self.generate = self.config['meta']['generate'] s
elf.start = sel
f.config['meta']['start'] self.norepeats = True self.saved_tags = {} def load_config(self, config_file): """load the config file into the static config variable, but only once""" if not os.path.isfile(config_file): sys.exit("config file: %s is not a file" % config_file) if not self.config: with open(config_file) as file: self.config = yaml.load(file) def create(self): """get an random selection.""" # if we don't care about repeats, reload the config file after every use if not self.norepeats: self.backup_config = self.config # start the string with the "start" variable select = self.get_random_item_from( self.start ) logging.debug("inital string %s" % select) select = self.interpolate(select) # these get set in interpolate, but must be unset elsewhere, since it's a # recursive function that doesn't know when its time is over self.saved_tags = {} if not self.norepeats: self.config = self.backup_config return select def get_random_item_from(self, listname): """remove a random item from one of the lists in the config, and return it""" pick = random.randint(0, len(self.config[listname]) - 1) return self.config[listname].pop(pick) def interpolate(self, string): """replace references in string with other items from hash, recursive""" # look for a reference, which looks like [hashname] m = re.search(r'\[([^]]*)\]', string) if m: tag = m.group(1) logging.debug("found tag %s" % tag) # the listname may need to be saved, so it can be reused later if ':' in tag: (list_name, saved_tag) = tag.split(':') else: list_name = tag saved_tag = '' logging.debug("tag split into list_name/saved_tag: %s/%s" % (list_name, saved_tag)) # get the new selection to replace the tag with selection = '' if list_name in self.saved_tags: # check if the list_name is actually a saved tag selection = self.saved_tags[list_name] else: # otherwise grab a random selection from the choosen list selection = self.get_random_item_from(list_name) # if we want to save the selection, do that now if saved_tag: self.saved_tags[saved_tag] = selection # there may be more interpolation logging.debug("replacing [%s] with %s" % (tag, selection)) string = self.interpolate( string.replace('[%s]' % tag, selection, 1) ) return string def main(config_file): logging.basicConfig(level=logging.WARNING) tool_config = ToolConfig(config_file) print tool_config.title # print out each random selection for x in range(tool_config.generate): item = tool_config.create() print "%d: %s" % (x+1, item) def usage(error_msg=''): usage_msg = "usage: %s <config_file>" % sys.argv[0] if error_msg: sys.exit("%s\n%s" % (error_msg, usage_msg)) else: sys.exit(usage_msg) if __name__ == '__main__': # make sure our arguments are correct if len(sys.argv) > 1: config_file = sys.argv[1] if not os.path.isfile(config_file): usage("config file %s isn't a file" % config_file) main(config_file) else: usage()
# coding: utf-8 """ Kubernetes No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: release-1.23 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kubernetes.client.configuration import Configuration class V1IngressClassSpec(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'controller': 'str', 'parameters': 'V1IngressClassParametersReference' } attribute_map = { 'controller': 'controller', 'parameters': 'parameters' } def __init__(self, controller=None, parameters=None, local_vars_configuration=None): # noqa: E501 """V1IngressClassSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._controller = None self._parameters = None self.discriminator = None if controller is not None: self.controller = controller if parameters is not None: self.para
meters = parameters @property def controller(self): """Gets the controller of this V1IngressClassSpec. # noqa: E501 Controller refe
rs to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501 :return: The controller of this V1IngressClassSpec. # noqa: E501 :rtype: str """ return self._controller @controller.setter def controller(self, controller): """Sets the controller of this V1IngressClassSpec. Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501 :param controller: The controller of this V1IngressClassSpec. # noqa: E501 :type: str """ self._controller = controller @property def parameters(self): """Gets the parameters of this V1IngressClassSpec. # noqa: E501 :return: The parameters of this V1IngressClassSpec. # noqa: E501 :rtype: V1IngressClassParametersReference """ return self._parameters @parameters.setter def parameters(self, parameters): """Sets the parameters of this V1IngressClassSpec. :param parameters: The parameters of this V1IngressClassSpec. # noqa: E501 :type: V1IngressClassParametersReference """ self._parameters = parameters def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1IngressClassSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1IngressClassSpec): return True return self.to_dict() != other.to_dict()
# -*- coding: utf-8 -*- import datetime as dt from tradenews.database import ( Column, db, Model, SurrogatePK, ) class NewsCluster(SurrogatePK, Model): __ta
blename__ = 'newscluster' # id = Column(db.Integer(), nullable=False, primary_key=True) date = Column(db.Text(), nullable=False, defa
ult=dt.datetime.utcnow) title = Column(db.Text(), nullable=True) text = Column(db.Text(), nullable=True) cluster = Column(db.Integer(), nullable=True) def __init__(self): db.Model.__init__(self)
# Copyright (c) 2007, Enthought, Inc. # License: BSD Style. #--(Interfaces)----------------------------------------------------------------- """ Interfaces ========== In Traits 3.0, the ability to define, implement and use *interfaces* has been added to the package. Defining Interfaces ------------------- Interfaces are defined by subclassing from the **Interface** class, as shown in the example below:: from traits.api import Interface class IName ( Interface ): def get_name ( self ): " Returns the name of an object. " This same code is shown in the **IName Interface** tab of the code. Interface classes are intended mainly as documentation of the methods and traits that the interface defines, and should not contain any actual implementation code, although no check is performed to enforce this currently. Implementing Interfaces ----------------------- A class declares that it implements one or more interfaces using the **implements** function, which has the form:: implements( interface [, interface2, ..., interfacen] ) The semantics of this function is that the class declares that it implements each of the *interfaces* specified as an argument to **implements**. Also, the call to **implements** must occur at class scope within the class definition, as shown in the following example:: from traits.api import HasTraits, implements class Person ( HasTraits ): implements( IName ) ... Only a single call to **implements** should occur within a class definition. Refer to the **Person Class** tab in the code for a complete example of using **implements**. Note that in the current version, traits does not check to ensure that the class containing the **implements** function actually implements the interfaces it says it does. Using Interfaces ---------------- Being able to define and implement interfaces would be of little use without the ability to *use* interfaces in your code. In traits, using an interface is accomplished using the **Instance** trait, as shown in the following example:: from traits.api import HasTraits, Instance class Apartment ( HasTraits ): renter = Instance( IName ) Using an interface class in an **Instance** trait definition declares that the trait only accepts values which are objects that either: - Implement the specified interface. - Can be adapted to an object that implements the specified interface. Additional information on what it means to *adapt* an object to implement an interface is presented in the next section of the tutorial. As before, the **Instance** trait can also be used with classes that are not interfaces, such as:: from traits.api import HasTraits, Instance class Apartment ( HasTraits ): renter = Instance( Person ) In this case, the value of the trait must be an object which is an instance of the specified class or one of its subclasses. """ #--<Imports>-------------------------------------------------------------------- from traits.api import * #--[IName Interface]------------------------------------------------------------ # Define the 'IName' interface: class IName ( Interface ): def get_name ( self ): """ Returns the name of an object. """ #--[Person Class]--------------------------------------------------------------- class Person ( HasTraits ): implements( IName ) first_name = Str( 'John' ) last_name = Str( 'Doe' ) # Implementation of the 'IName' interface: def get_name ( self ): """ Returns the name of an object. """ return ('%s %s' % ( self.first_name, self.last_name )) #--[Apartment Class]-------------------
-----------------------------------------
# Define a class using an object that implements the 'IName' interface: class Apartment ( HasTraits ): renter = Instance( IName ) #--[Example*]-------------------------------------------------------------------- # Create an object implementing the 'IName' interface: william = Person( first_name = 'William', last_name = 'Adams' ) # Create an apartment, and assign 'renter' an object implementing 'IName': apt = Apartment( renter = william ) # Verify that the object works correctly: print 'Renter is:', apt.renter.get_name()
e(0, 8, 2): elem_on_1_has_value, elem_on_1_value = sess.run( [elem_on_1_has_value_t, elem_on_1_t]) self.assertTrue(elem_on_1_has_value) self.assertEqual(i, elem_on_1_value) elem_on_2_has_value, elem_on_2_value = sess.run( [elem_on_2_has_value_t, elem_on_2_t]) self.assertTrue(elem_on_2_has_value) self.assertEqual(i + 1, elem_on_2_value) elem_on_1_has_value, elem_on_1_value = sess.run( [elem_on_1_has_value_t, elem_on_1_t]) self.assertTrue(elem_on_1_has_value) self.assertEqual(8, elem_on_1_value) self.assertFalse(self.evaluate(elem_on_1_has_value_t)) self.assertFalse(self.evaluate(elem_on_2_has_value_t)) with self.assertRaises(errors.InvalidAr
gumentError): self.evaluate(elem_on_1_t) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(elem_
on_2_t) @combinations.generate(skip_v2_test_combinations()) def testUneven(self): dataset = dataset_ops.Dataset.range(10) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"], max_buffer_size=4) config = config_pb2.ConfigProto(device_count={"CPU": 3}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1 = multi_device_iterator.get_next("/cpu:1") self.assertEqual(i, self.evaluate(elem_on_1)) for i in range(0, 10, 2): elem_on_2 = multi_device_iterator.get_next("/cpu:2") self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) @combinations.generate(skip_v2_test_combinations()) def testMultipleInitializationsGraph(self): if context.executing_eagerly(): return with ops.device("/cpu:0"): epoch = array_ops.placeholder(dtypes.int64, shape=[]) dataset1 = dataset_ops.Dataset.from_tensors(epoch).repeat(1000) dataset2 = dataset_ops.Dataset.range(1000) dataset = dataset_ops.Dataset.zip((dataset1, dataset2)) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4) elem_on_1, elem_on_2 = multi_device_iterator.get_next() init_op = multi_device_iterator.initializer config = config_pb2.ConfigProto(device_count={"CPU": 3}) pool = config.session_inter_op_thread_pool.add() pool.num_threads = 2 with session.Session(config=config) as sess: for i in range(1000): sess.run(init_op, feed_dict={epoch: i}) self.assertEqual([(i, 0), (i, 1)], self.evaluate([elem_on_1, elem_on_2])) @combinations.generate(skip_v2_test_combinations()) def testMultipleInitializationsEager(self): if not context.executing_eagerly(): return with ops.device("/cpu:0"): dataset1 = dataset_ops.Dataset.range(1000) dataset2 = dataset_ops.Dataset.range(1000) dataset = dataset_ops.Dataset.zip((dataset1, dataset2)) for _ in range(5): multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4) elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2])) @combinations.generate(skip_v2_test_combinations()) def testBasicGpu(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") dataset = dataset_ops.Dataset.range(10) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/gpu:0"]) config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.assertEqual(i, self.evaluate(elem_on_1)) self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) @combinations.generate(skip_v2_test_combinations()) def testUnevenGpu(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") dataset = dataset_ops.Dataset.range(10) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/gpu:0"], max_buffer_size=4) config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1 = multi_device_iterator.get_next("/cpu:1") self.assertEqual(i, self.evaluate(elem_on_1)) for i in range(0, 10, 2): elem_on_2 = multi_device_iterator.get_next("/gpu:0") self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) @combinations.generate(skip_v2_test_combinations()) def testGetNextAsOptionalGpu(self): if not test_util.is_gpu_available() or context.executing_eagerly(): self.skipTest("No GPU available") dataset = dataset_ops.Dataset.range(9) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/gpu:0"]) elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional() elem_on_1_has_value_t = elem_on_1.has_value() elem_on_1_t = elem_on_1.get_value() elem_on_2_has_value_t = elem_on_2.has_value() elem_on_2_t = elem_on_2.get_value() config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1}) with self.test_session(config=config) as sess: self.evaluate(multi_device_iterator.initializer) for i in range(0, 8, 2): elem_on_1_has_value, elem_on_1_value = sess.run( [elem_on_1_has_value_t, elem_on_1_t]) self.assertTrue(elem_on_1_has_value) self.assertEqual(i, elem_on_1_value) elem_on_2_has_value, elem_on_2_value = sess.run( [elem_on_2_has_value_t, elem_on_2_t]) self.assertTrue(elem_on_2_has_value) self.assertEqual(i + 1, elem_on_2_value) elem_on_1_has_value, elem_on_1_value = sess.run( [elem_on_1_has_value_t, elem_on_1_t]) self.assertTrue(elem_on_1_has_value) self.assertEqual(8, elem_on_1_value) self.assertFalse(self.evaluate(elem_on_1_has_value_t)) self.assertFalse(self.evaluate(elem_on_2_has_value_t)) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(elem_on_1_t) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(elem_on_2_t) @combinations.generate(skip_v2_test_combinations()) def testOptimization(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"])) dataset = dataset.skip(0) # this should be optimized away dataset = dataset.cache() options = dataset_ops.Options() options.experimental_optimization.noop_elimination = True dataset = dataset.with_options(options) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"]) config = config_pb2.ConfigProto(device_count={"CPU": 3}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.assertEqual(i, self.evaluate(elem_on_1)) self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next()
# SPDX-License-Ident
ifier: MIT # Copyright (C) 2019-2020 Tobias Gruetzmacher # Copyright (C) 2019-2020 Daniel Ring from .common import _ParserScraper class ProjectFuture(_ParserScraper): imageSearch = '//td[@class="tamid"]/img' prevSearch = '//a[./img[@alt="Previous"]]' def __init__(self, name, comic, first, last=None): if name == 'ProjectFuture': super(ProjectFuture, self).__init
__(name) else: super(ProjectFuture, self).__init__('ProjectFuture/' + name) self.url = 'http://www.projectfuturecomic.com/' + comic + '.php' self.stripUrl = self.url + '?strip=%s' self.firstStripUrl = self.stripUrl % first if last: self.url = self.stripUrl self.endOfLife = True @classmethod def getmodules(cls): return ( cls('AWalkInTheWoods', 'simeon', '1', last='12'), cls('BenjaminBuranAndTheArkOfUr', 'ben', '00', last='23'), cls('BookOfTenets', 'tenets', '01', last='45'), cls('CriticalMass', 'criticalmass', 'cover', last='26'), cls('DarkLordRising', 'darklord', '01-00', last='10-10'), cls('Emily', 'emily', '01-00'), cls('FishingTrip', 'fishing', '01-00'), cls('HeadsYouLose', 'heads', '00-01', last='07-12'), cls('NiallsStory', 'niall', '00'), cls('ProjectFuture', 'strip', '0'), cls('RedValentine', 'redvalentine', '1', last='6'), cls('ShortStories', 'shorts', '01-00'), cls('StrangeBedfellows', 'bedfellows', '1', last='6'), cls('TheAxemanCometh', 'axeman', '01-01', last='02-18'), cls('ToCatchADemon', 'daxxon', '01-00', last='03-14'), cls('TheDarkAngel', 'darkangel', 'cover', last='54'), cls('TheEpsilonProject', 'epsilon', '00-01'), cls('TheHarvest', 'harvest', '01-00'), cls('TheSierraChronicles', 'sierra', '0', last='29'), cls('TheTuppenyMan', 'tuppenny', '00', last='16'), cls('TurningANewPage', 'azrael', '1', last='54'), )
""" Management command to load language fixtures as tags """ from __future__ import unicode_literals import csv import os import re from django.contrib.auth.models import User from django.core.management.base import BaseCommand, CommandError from orb.models import Category, Tag def has_data(input): """Identify if the input contains any meaningful string content CSV input may include non-breaking space which is a Unicode character, however the csv module does not handle unicode. Args: input: string value Returns: bool """ input = input.replace("\xc2\xa0", " ") return bool(re.compile("\S").match(input)) class Command(BaseCommand): help = "Loads languages from CSV fixtures into tag database" def add_arguments(self, parser): parser.add_argument( "--file", dest="fixture", default="orb/fixtures/iso639.csv", help="CSV file path", ) parser.add_argument( "--image", dest="image", default="tag/language_default.png", help="Default image (static image path)", ) parser.add_argument( "--user", dest="user", type=int, default=1, help="Default user to mark as creating", ) parser.add_argument
( "--iso6392", action="store_true", dest="iso6
392", default=False, help="Flag for including all ISO 639.2 (only ISO 639.1 included by default)", ) def handle(self, *args, **options): try: user = User.objects.get(pk=options["user"]) except User.DoesNotExist: raise CommandError("No match user found for '{0}'".format(options["user"])) category, _ = Category.objects.get_or_create(name="Language", defaults={ 'top_level': True, }) if not os.path.exists(options["fixture"]): raise CommandError("Cannot find file '{0}'".format(options["fixture"])) with open(options["fixture"]) as csvfile: reader = csv.DictReader(csvfile) for row in reader: row = {k: v.decode('utf-8') for k, v in row.items()} if not options["iso6392"] and not has_data(row["iso639-1"]): continue tag, _ = Tag.objects.get_or_create(name=row["English"], defaults={ "create_user": user, "update_user": user, "category": category, "image": options["image"], })
class Sol
ution: # @param {integer[]} nums # @param {integer} target # @return {integer[]} def searchRange(self, nums, target): res = [] l, r = 0, len(nums) - 1 while l <= r: m = (l + r) /2 if nums[m] < target: l = m + 1 else: r = m - 1 res.append(l) l,
r = 0, len(nums) - 1 while l <= r: m = (l + r) /2 if nums[m] <= target: l = m + 1 else: r = m - 1 res.append(r) res = [-1, -1] if res[0] > res[1] else res return res
# Copyright 2019 kubeflow.org. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from kubernetes import client from kfserving import ( constants, KFServingClient, V1beta1InferenceService, V1beta1InferenceServiceSpec, V1beta1PredictorSpec, V1beta1TorchServeSpec, ) from kubernetes.client import V1ResourceRequirements from ..common.utils import predict from ..common.utils import KFSERVING_TEST_NAMESPACE KFServing = KFServingClient(config_file=os.environ.get("KUBECONFIG", "~/.kube/config")) def test_torchserve_kfserving(): service_name = "mnist" predictor = V1beta1PredictorSpec( min_replicas=1, pytorch=V1beta1TorchServeSpec( storage_uri="gs://kfserving-examples/models/torchserve/image_classifier", protocol_version="v1", resources=V1ResourceRequirements( requests={"cpu": "1", "memory": "4Gi"}, limits={"cpu": "1", "memory": "4Gi"}, ), ), ) isvc = V1beta1InferenceService( api_version=constants
.KFSERVING_V1BETA1, kind=constants.KFSERVING_KIND, metadata=client.V1ObjectMeta( name=service_name, namespace=KFSERVING_TEST_NAMESPACE ), spec=V1beta1InferenceServiceSpec(predictor=predictor), ) KFServing.create(isvc) KFServi
ng.wait_isvc_ready(service_name, namespace=KFSERVING_TEST_NAMESPACE) res = predict(service_name, "./data/torchserve_input.json") assert(res.get("predictions")[0] == 2) KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
#!/usr/bin/python # -*- coding: utf-8 -*- import os import time from creds import get_nova_obj from scit_config import * from scit_db import * #get authed nova obj nova = get_nova_obj() def create_nova_vm(logger, server_name, usr_dst): conf = getScitConfig() retry = int(conf["scit"]["scit_clean_retry"]) #check status and write into db ret = create_vm_min(logger, server_name, usr_dst) if not ret: while True: if retry <= 0: print "create vm " + server_name + " timeout" if logger: logger.error("create vm " + server_name + " timeout.") return False else: delete_nova_vm(logger, server_name, None) time.sleep(10) retry = retry - 1 ret = create_vm_min(logger, server_name, usr_dst) if ret: break #write into db addVm(ret["vm_name"], ret["vm_fixip"], "READY") return True #minimal create vm def create_vm_min(logger, server_name, usr_dst): ret = {} ret["vm_name"] = server_name try: f = open(usr_dst) user_data = f.read() f.close() except: if logger: logger.error("create vm failed, is there a init script?") return False #read the conf conf = getScitConfig() img = conf["instance"]["instance_img"] flvr = conf["instance"]["instance_flvr"] key_pair = conf["instance"]["instance_keypair"] network_id = conf["network"]["network_ext_netid"] #query whether the name is already exists. #try create if not nova.keypairs.findall(name=key_pair): with open(os.path.expanduser('/root/.ssh/id_rsa.pub')) as fpubkey: nova.keypairs.create(name=key_pair, public_key=fpubkey.read()) ta = time.time() try: image = nova.images.find(name=img) flavor = nova.flavors.find(name=flvr) network = nova.networks.find(id=network_id) instance = nova.servers.create(name=server_name, image=image, flavor=flavor, userdata=user_data, network=network, key_name=key_pair) except: if logger: logger.error("failed create nova vm, exception throw out.") print "expceton found when try creating nova vm." return False status = instance.status while status == 'BUILD': time.sleep(5) print "waiting vm active.." # Retrieve the instance again so the status field updates instance = nova.servers.get(instance.id) status = instance.status tb = time.time() t = int(tb-ta + (tb-ta - int(tb-ta))/1.0) print "Total: " + str(t) + " s." if logger: logger.info("create vm " + server_name + ", Total " + str(t) + " s.") #not active or network is not ok if status != 'ACTIVE': return False instance = nova.servers.get(instance.id) network_flag = False if instance.networks: for item in instance.networks: if instance.networks[item]: ret["vm_fixip"] = instance.networks[item][0] network_flag = True if not network_flag: print "vm network init failed." if logger: logger.error("vm: " + server_name + " network init failed.") return False print "successful create vm: " + server_name if logger: logger.info("vm: " + server_name + " created.") return ret #bind floatip to vm #check whether a clean server is ok to online def vm_extra_set(logger, server_name, floatip): try: instance = nova.servers.find(name = server_name) except: print "vm " + server_name + "not found." if logger: logger.error("vm " + server_name + "not found.") return False if instance.status == "ACTIVE": floating_ip = nova.floating_ips.find(ip=floatip) instance.add_floating_ip(floating_ip) #check whether server is ok #write into db updateFloatip(server_name, floatip) return True else: return False def vm_free_set(logger, server_name): instance = None try: instance = nova.servers.find(name = server_name) except: print "vm " + server_name + "not found." if logger: logger.error("vm " + server_name + "not found?!") return False floatip = "" for item in instance.networks: if len(instance.networks[item]) == 2: floatip = instance.networks[item][1] else: return False #free the floatip instance.remove_floating_ip(floatip) return floatip #delete the vm def delete_nova_vm(logger, server_name, float_ip): #clean the env #remove the knownlist info if not server_name: print "vm name illegal." if logger: logger.warn("vm name illegal, delete task stopped.") print "deleting vm " + server_name if logger: logger.info("try deleting vm " + server_name) if float_ip: os.popen("sed -i '/^.*" + float_ip + ".*/d' /root/.ssh/known_hosts") #os.popen("sed -i '/^.*" + float_ip + ".*/d' /etc/ansible/hosts") try: instance = nova.servers.find(name=server_name) except: print "vm: " + server_name + " not found." if logger: logger.warn("vm " + server_name + " not found.") return True instance.delete() #clear the db #runSQL("delete from scit_vm where vm_name = " + server_name + ";") delVm(server_name) #confirm that is delete ok conf = getScitConfig() retry = int(conf["scit"]["scit_clean_retry"]) while True: if retry <= 0: print "delete task timeout." if logger: logger.error("delete vm: " + server_name + " task timeout.") return False try: instance = nova.servers.find(name=server_name) retry = retry - 1 except: break #clear the vm def clear_nova_vm(logger): #clear the all nova vm instances = nova.servers.list() retry = 0 if instances: for server in instances: print "deleting the vm: " + server.name if logger: logger.info("deleting the vm: " + server.name) server.delete() else: return True #wait the clear ok while True: if retry > 10:
#retry 10 t
imes print "clear vm failed, timeout.." if logger: logger.error("clear vm retry timeout.") return False instances = nova.servers.list() if instances: retry = retry + 1 time.sleep(10) else: print "all vm cleared.." logger.info("cleared the vms..") return True #main func def main(): create_nova_vm(None, server_name="test2", img="CentOS 6.5 x86_64", flvr="m1.small", usr_dst="/root/openstack/pys/scit-sys/scripts/init.sh", key_pair="dns_test", network_id="0e13d973-f3a7-4e65-aba0-7d0f392ce13b") #delete_nova_vm(None, server_name="test2", float_ip="192.168.1.122") return 0 #code entry if __name__ == '__main__': #main() vm_extra_set(None, "SCIT_VM00", "192.168.1.122") #clear_nova_vm(None)
# coding: utf8 # jmdict.py # 2/14/2014 jich
i if __name__ == '__main__': import sys sys.path.append('..') def get(dic): """ @param dic str such as ipadic or unidic @return bool """ import rc return rc.runscript('getcabocha.py', (dic,)
) if __name__ == "__main__": get('unidic') # EOF
from django.db import models from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.fields import GenericForeignKey from django.utils.translation import ugettext_l
azy as _ from django.conf import settings from django.
utils import timezone from qbase.time import timesince from qbase import fields from qbase.models import get_contenttype from qevent.registry import check import functools from collections import defaultdict def stream(f): """ 把返回过滤条件的函数,变成queryset 并扩充了 offset, limit 条件 """ @functools.wraps(f) def wrapped(manager, *args, **kwargs): offset, limit = kwargs.pop('_offset', None), kwargs.pop('_limit', None) qs = f(manager, *args, **kwargs) if isinstance(qs, dict): qs = manager.filter(**qs) elif isinstance(qs, (list, tuple)): qs = manager.filter(*qs) if offset or limit: qs = qs[offset:limit] return qs return wrapped class ActionManager(models.Manager): # 强制指定private=True, 才会显示所有events; 否则只显示 可公开的events def public(self, *args, **kwargs): if 'private' not in kwargs: kwargs['private'] = False elif kwargs.get('private'): del kwargs['private'] return self.filter(*args, **kwargs) @stream def actor(self, obj, **kwargs): """ 指定 object AS actor 的所有 actions """ check(obj) return obj.actor_actions.public(**kwargs) @stream def target(self, obj, **kwargs): """ 指定 object AS target 的所有 actions """ check(obj) return obj.target_actions.public(**kwargs) @stream def relative(self, obj, **kwargs): """ 指定 object AS relative 的所有 actions """ check(obj) return obj.relative_actions.public(**kwargs) def _object_actions(self, obj): check(obj) ct = get_contenttype(obj) return models.Q( actor_type_id=ct.pk, actor_object_id=obj.pk, ) | models.Q( target_type_id=ct.pk, target_object_id=obj.pk, ) | models.Q( relative_type_id=ct.pk, relative_object_id=obj.pk, ) @stream def any(self, obj, **kwargs): """ 指定 object 的所有 actions """ return self.public(self._object_actions(obj), **kwargs) @stream def content(self, model, **kwargs): """ 指定 model 的所有 actions """ check(model) ct = get_contenttype(model) return self.public( (models.Q(actor_type_id=ct.pk) | models.Q(target_type_id=ct.pk) | models.Q(relative_type_id=ct.pk) ), **kwargs) model_actions = content Actions = ActionManager() class Action(models.Model): actor_type = models.ForeignKey(ContentType, related_name='act_actor', db_index=True, null=True, blank=True, default=None) actor_object_id = fields.char_index_null(max_length=64) actor = GenericForeignKey('actor_type', 'actor_object_id') # verb = fields.char_index('动作', max_length=32) # target_type = models.ForeignKey(ContentType, related_name='act_target', db_index=True) target_object_id = fields.char_index(max_length=64) target = GenericForeignKey('target_type', 'target_object_id') # relative_type = models.ForeignKey(ContentType, related_name='act_relative', null=True, blank=True, default=None) relative_object_id = fields.char_null(max_length=64) relative = GenericForeignKey('relative_type', 'relative_object_id') # timestamp = fields.datetime_auto_add() description = fields.text('描述') actor_only = fields.falsy('单向') private = fields.falsy('私密') class Meta: verbose_name = verbose_name_plural = '事件' ordering = ('-timestamp', ) objects = Actions def __str__(self): return '{} {} {}'.format(str(self.actor), self.verb, str(self.target)) def timesince(self, now=None): return timesince(self.timestamp, now) def action_handler(verb, **kwargs): """ qevent.signals.action 的处理函数 """ kwargs.pop('signal', None) actor = kwargs.pop('sender') if hasattr(verb, '_proxy____args'): verb = verb . _proxy____args[0] event = Action( actor_type=get_contenttype(actor), actor_object_id=actor.pk if actor else None, verb=str(verb), timestamp=kwargs.pop('timestamp', timezone.now()), description=kwargs.pop('description', None), private=bool(kwargs.pop('private', False)), ) for opt in ('target', 'relative'): obj = kwargs.pop(opt, None) if obj is not None: check(obj) setattr(event, opt+'_type', get_contenttype(obj)) setattr(event, opt+'_object_id', obj.pk) hasattr(event, 'data') and len(kwargs) and setattr(event, 'data', kwargs) event.save() return event
#!/usr/bin/python3 """ Given an unsorted array nums, reorder it such that nums[0] < nums[1] > nums[2] < nums[3].... Example 1: Input: nums = [1, 5, 1, 1, 6, 4] Output: One possible answer
is [1, 4, 1, 5, 1, 6]. Example 2: Input: nums = [1, 3, 2, 2, 3, 1] Output: One possible answer is [2, 3, 1, 3, 1, 2]. Note: You may assume all input has valid answer. Follow Up: Can you do it in O(n) time and/or in-place with O(1) extra space? """ from typing import List class Solution: def wiggleSort(self, nums: List[int]) -
> None: """ Do not return anything, modify nums in-place instead. Median + 3-way partitioning """ n = len(nums) # mid = self.find_kth(nums, 0, n, (n - 1) // 2) # median = nums[mid] median = list(sorted(nums))[n//2] # three way pivot odd = 1 even = n - 1 if (n - 1) % 2 == 0 else n - 2 i = 0 while i < n: if nums[i] < median: if i >= even and i % 2 == 0: i += 1 continue nums[i], nums[even] = nums[even], nums[i] even -= 2 elif nums[i] > median: if i <= odd and i % 2 == 1: i += 1 continue nums[i], nums[odd] = nums[odd], nums[i] odd += 2 else: i += 1 def find_kth(self, A, lo, hi, k): p = self.pivot(A, lo, hi) if k == p: return p elif k > p: return self.find_kth(A, p + 1, hi, k) else: return self.find_kth(A, lo, p, k) def pivot(self, A, lo, hi): # need 3-way pivot, otherwise TLE p = lo closed = lo for i in range(lo + 1, hi): if A[i] < A[p]: closed += 1 A[closed], A[i] = A[i], A[closed] A[closed], A[p] = A[p], A[closed] return closed if __name__ == "__main__": Solution().wiggleSort([1, 5, 1, 1, 6, 4])
self._verify_xblock_info_state(xblock_info, 'visibility_state', expected_state, path, should_equal) def _verify_explicit_staff_lock_state(self, xblock_info, expected_state, path=None, should_equal=True): """ Verify the explicit staff lock state of an item in the xblock_info. """ self._verify_xblock_info_state(xblock_info, 'has_explicit_staff_lock', expected_state, path, should_equal) def test_empty_chapter(self): empty_chapter = self._create_child(self.course, 'chapter', "Empty Chapter") xblock_info = self._get_xblock_info(empty_chapter.location) self._verify_visibility_state(xblock_info, VisibilityState.unscheduled) @ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split) def test_chapter_self_paced_default_start_date(self, store_type): course = CourseFactory.create(default_store=store_type) course.self_paced = True self.store.update_item(course, self.user.id) chapter = self._create_child(course, 'chapter', "Test Chapter") sequential = self._create_child(chapter, 'sequential', "Test Sequential") self._create_child(sequential, 'vertical', "Published Unit", publish_item=True) self._set_release_date(chapter.location, DEFAULT_START_DATE) xblock_info = self._get_xblock_info(chapter.location) self._verify_visibility_state(xblock_info, VisibilityState.live) def test_empty_sequential(self): chapter = self._create_child(self.course, 'chapter', "Test Chapter") self._create_child(chapter, 'sequential', "Empty Sequential") xblock_info = self._get_xblock_info(chapter.location) self._verify_visibility_state(xblock_info, VisibilityState.unscheduled) self._verify_visibility_state(xblock_info, VisibilityState.unscheduled, path=self.FIRST_SUBSECTION_PATH) def test_published_unit(self): """ Tests the visibility state of a published unit with release date in the future. """ chapter = self._create_child(self.course, 'chapter', "Test Chapter") sequential = self._create_child(chapter, 'sequential', "Test Sequential") self._create_child(sequential, 'vertical', "Published Unit", publish_item=True) self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True) self._set_release_date(chapter.location, datetime.now(UTC) + timedelta(days=1)) xbl
ock_info = self._get_xblock_info(chapter.location) self._verify_visibility_state(xblock_info, VisibilityState.ready) self._verify_visibility_state(xblock_info, VisibilityState.ready, path=self.FIRST_SUBSECTION_PATH) self._verify_visibility_state(xblock_info, VisibilityState.ready, path=self.FIRST_UNIT_PATH) self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECON
D_UNIT_PATH) def test_released_unit(self): """ Tests the visibility state of a published unit with release date in the past. """ chapter = self._create_child(self.course, 'chapter', "Test Chapter") sequential = self._create_child(chapter, 'sequential', "Test Sequential") self._create_child(sequential, 'vertical', "Published Unit", publish_item=True) self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True) self._set_release_date(chapter.location, datetime.now(UTC) - timedelta(days=1)) xblock_info = self._get_xblock_info(chapter.location) self._verify_visibility_state(xblock_info, VisibilityState.live) self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_SUBSECTION_PATH) self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_UNIT_PATH) self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH) def test_unpublished_changes(self): """ Tests the visibility state of a published unit with draft (unpublished) changes. """ chapter = self._create_child(self.course, 'chapter', "Test Chapter") sequential = self._create_child(chapter, 'sequential', "Test Sequential") unit = self._create_child(sequential, 'vertical', "Published Unit", publish_item=True) self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True) # Setting the display name creates a draft version of unit. self._set_display_name(unit.location, 'Updated Unit') xblock_info = self._get_xblock_info(chapter.location) self._verify_visibility_state(xblock_info, VisibilityState.needs_attention) self._verify_visibility_state(xblock_info, VisibilityState.needs_attention, path=self.FIRST_SUBSECTION_PATH) self._verify_visibility_state(xblock_info, VisibilityState.needs_attention, path=self.FIRST_UNIT_PATH) self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH) def test_partially_released_section(self): chapter = self._create_child(self.course, 'chapter', "Test Chapter") released_sequential = self._create_child(chapter, 'sequential', "Released Sequential") self._create_child(released_sequential, 'vertical', "Released Unit", publish_item=True) self._create_child(released_sequential, 'vertical', "Staff Only Unit", staff_only=True) self._set_release_date(chapter.location, datetime.now(UTC) - timedelta(days=1)) published_sequential = self._create_child(chapter, 'sequential', "Published Sequential") self._create_child(published_sequential, 'vertical', "Published Unit", publish_item=True) self._create_child(published_sequential, 'vertical', "Staff Only Unit", staff_only=True) self._set_release_date(published_sequential.location, datetime.now(UTC) + timedelta(days=1)) xblock_info = self._get_xblock_info(chapter.location) # Verify the state of the released sequential self._verify_visibility_state(xblock_info, VisibilityState.live, path=[0]) self._verify_visibility_state(xblock_info, VisibilityState.live, path=[0, 0]) self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[0, 1]) # Verify the state of the published sequential self._verify_visibility_state(xblock_info, VisibilityState.ready, path=[1]) self._verify_visibility_state(xblock_info, VisibilityState.ready, path=[1, 0]) self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[1, 1]) # Finally verify the state of the chapter self._verify_visibility_state(xblock_info, VisibilityState.ready) def test_staff_only_section(self): """ Tests that an explicitly staff-locked section and all of its children are visible to staff only. """ chapter = self._create_child(self.course, 'chapter', "Test Chapter", staff_only=True) sequential = self._create_child(chapter, 'sequential', "Test Sequential") vertical = self._create_child(sequential, 'vertical', "Unit") xblock_info = self._get_xblock_info(chapter.location) self._verify_visibility_state(xblock_info, VisibilityState.staff_only) self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_SUBSECTION_PATH) self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_UNIT_PATH) self._verify_explicit_staff_lock_state(xblock_info, True) self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_SUBSECTION_PATH) self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_UNIT_PATH) vertical_info = self._get_xblock_info(vertical.location) add_container_page_publishing_info(vertical, vertical_info) self.assertEqual(_xblock_type_and_display_name(chapter), vertical_info["staff_lock_from"]) def test_no_staff_only_section(self): """ Tests that a section with a staff-locked subsection and a visible subsection is not staff locked itself. """
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.modeling.fitting.component Contains the FittingComponent class. # ----------------------------------------------------------------- # Ensure Python 3 compatibility from __future__ import absolute_import, division, print_function # Import standard modules from abc import ABCMeta # Import astronomical modules from astropy.table import Table # Import the relevant PTS classes and modules from ..component.component import ModelingComponent from .tables import RunsTable from .run import FittingRun from .context import FittingContext # ----------------------------------------------------------------- class FittingComponent(ModelingComponent): """ This class... """ __metaclass__ = ABCMeta # ----------------------------------------------------------------- def __init__(self, *args, **kwargs): """ The constructor ... :param args: :param kwargs: :return: """ # Call the constructor of the base class super(FittingComponent, self).__init__(*args, **kwargs) # -- Attributes -- self.context = None # ----------------------------------------------------------------- def setup(self, **kwargs): """ This function ... :return: """ # Call the setup function of the base class super(FittingComponent, self).setup(**kwargs) # Load the fitting context self.context = FittingContext(self.fit_path) # ----------------------------------------------------------------- @property def runs_table_path(self): """ This function ... :return: """ return self.context.runs_table_path # ----------------------------------------------------------------- @property def database_path(self): """ This function ... :return: """ return self.context.database_path # ----------------------------------------------------------------- @property def statistics_path(self): """ This function ... :return: """ return self.context.statistics_path # ----------------------------------------------------------------- @property def populations_path(self): """ This function ... :return: """ return self.context.populations_path # ----------------------------------------------------------------- @property def earth_instrument_name(self): """ This function ... :return: """ return self.context.earth_instrument_name # ----------------------------------------------------------------- def load_fitting_run(self, name): """ This function ... :param name: :return: """ model_name = self.model_for_run(name) return FittingRun(self.config.path, name, model_name) # ----------------------------------------------------------------- @property def runs_table(self): """ This function ... :return: """ return RunsTable.from_file(self.runs_table_path) # ----------------------------------------------------------------- @property def run_names(self): """ This function ... :return: """ return self.runs_table.run_names # ----------------------------------------------------------------- def model_for_run(self, run_name): """ This function ... :param run_name: :return: """ return self.runs_table.model_for_run(run_n
ame) # ----------------------------------------------------------------- @property def statistics(self): """ This function ... :return: """ return Table.read(self.statistics_path) # ----------------------------------------------------------------
-
#!/usr/bin/env python3 # ScatterBackup - A chaotic backup solution # Copyright (C) 2015 Ingo Ruhnke <grumbel@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from scatterbackup.fileinfo import FileInfo class FileInfoTestCase(unittest.TestCase): def test_from_file(self): fileinfo = FileInfo.from_file("tests/data/test.txt") self
.assertEqual(11, fileinfo.size) self.assertEqual("6df4d50a41a5d20bc4faad8a6f09aa8f", fileinfo.blob.md5) self.assertEqual("bc9faaae1e35d52f3dea9651da12cd36627b8403", fileinfo.blob.sha1) # def test_json(self): # fileinfo = FileInfo
.from_file("tests/test.txt") # jstxt = fileinfo.json() # fileinfo2 = FileInfo.from_json(jstxt) # self.assertEqual(fileinfo, fileinfo2) if __name__ == '__main__': unittest.main() # EOF #
# -*- coding: utf-8 -*- from __future__ import unicode_literals import base from misc import GetPageInfo from models import PageIdentifier from category import GetSubcategoryInfos from revisions import GetCurrentContent, GetPageRevisionInfos from meta import GetSourceInfo def test_unicode_title(): get_beyonce = GetCurrentContent("Beyoncé Knowles") assert get_beyonce() def test_coercion_basic(): pid = PageIdentifier(title='Africa', page_id=123, ns=4, source='enwp') get_subcats = GetSubcategoryInfos(pid) assert get_subcats.input_param == 'Category:Africa' def test_web_request(): url = 'http://upload.wikimedia.org/wikipedia/commons/d/d2/Mcgregor.jpg' get_photo = base.WebR
equestOperation(url) res = get_photo() text = res[0] assert len(text) == 16408 def test_get_html(): get_africa_html = base.GetPageHTM
L('Africa') res = get_africa_html() text = res[0] assert len(text) > 350000 def test_missing_revisions(): get_revs = GetPageRevisionInfos('Coffee_lololololol') rev_list = get_revs() ''' Should return 'missing' and negative pageid ''' assert len(rev_list) == 0 def test_get_meta(): get_source_info = GetSourceInfo() meta = get_source_info() assert meta def test_client_passed_to_subops(): # This tests whether the client object given to the initial operation # is passed to its sub-operations. # Use just enough titles to force multiplexing so that we can get # sub ops to test. titles = ['a'] * (base.DEFAULT_QUERY_LIMIT.get_limit() + 1) client = base.MockClient() op = GetPageInfo(titles, client=client) assert id(op.subop_queues[0].peek().client) == id(client)
#!/usr/bin/env python ''' OWASP ZSC | ZCR Shellcoder ZeroDay Cyber Research Z3r0D4y.Com Ali Razmjoo shellcode template used : http://shell-storm.org/shellcode/files/shellcode-57.php ''' from core i
mport stack from core import template def run(dirname): command = 'mkdir %s' %(str(dirname)) return template.sys(stack.generate(com
mand.replace('[space]',' '),'%ecx','string'))
ath.expanduser(fsdbRoot) # replace ~ fsdbRoot = os.path.expandvars(fsdbRoot) # replace vars fsdbRoot = os.path.normpath(fsdbRoot) # replace /../ and so on fsdbRoot = os.path.realpath(fsdbRoot) # resolve links # check if path it's absolute if not os.path.isabs(fsdbRoot): raise Exception("fsdb can not operate on relative path") # on different platforms same unicode string could have different rappresentation if isinstance(fsdbRoot, unicode): fsdbRoot = unicodedata.normalize("NFC", fsdbRoot) configPath = os.path.join(fsdbRoot, Fsdb.CONFIG_FILE) if Fsdb.configExists(fsdbRoot): # warn user about config ignoring and load config from file self.logger.debug("Fsdb config file found. Runtime parameters will be ignored. ["+configPath+"]") conf = config.loadConf(configPath) self._conf = conf else: conf = dict() if mode is not None: conf['mode'] = mode if deep is not None: conf['deep'] = deep if hash_alg is not None: conf['hash_alg'] = hash_alg conf = config.normalizeConf(conf) self._conf = conf # make all parent directories if they do not exist self._makedirs(fsdbRoot) # write config file config.writeConf(configPath, conf) oldmask = os.umask(0) os.chmod(configPath, self._conf['mode']) os.umask(oldmask) # fsdbRoot it is an existing regular folder and we have read and write permission self.fsdbRoot = fsdbRoot self.logger.debug("Fsdb initialized successfully: "+self.__str__()) def add(self, filePath): """Add an existing file to fsdb. File under @filePath will be copied under fsdb directory tree Args: filePath -- path of the file to be add Returns: String rapresenting the digest of the file """ if not os.path.isfile(filePath): raise Exception("fsdb can not add: not regular file received") digest = Fsdb.fileDigest(filePath, algorithm=self._conf['hash_alg']) if self.exists(digest): self.logger.debug('Added File: ['+digest+'] ( Already exists. Skipping transfer)') return digest absPath = self.getFilePath(digest) absFolderPath = os.path.dirname(absPath) # make all parent directories if they do not exist self._makedirs(absFolderPath) # copy file and set permission oldmask = os.umask(0) shutil.copyfile(filePath, absPath) os.chmod(absPath, self._conf['mode']) os.umask(oldmask) self.logger.debug('Added file: "'+filePath+'" -> "'+absPath+'" [ '+digest+' ]') return digest def remove(self, digest): """Remove an existing file from fsdb. File with the given digest will be removed from fsdb and the directory tree will be cleaned (remove empty folders) Args: digest -- digest of the file to remove """ # remove file absPath = self.getFilePath(digest) os.remove(absPath) # clean directory tree tmpPath = os.path.dirname(absPath) while tmpPath != self.fsdbRoot: if os.path.islink(tmpPath): raise Exception('fsdb found a link in db tree: "'+tmpPath+'"') if len(os.listdir(tmpPath)) > 0: break os.rmdir(tmpPath) tmpPath = os.path.dirname(tmpPath) self.logger.debug('Removed file: "'+absPath+'" [ '+digest+' ]') def exists(self, digest): """Check file existence in fsdb Returns: True if file exists under this instance of fsdb, false otherwise """ return os.path.isfile(self.getFilePath(digest)) def getFilePath(self, digest): """Retrieve path
to the file with the given digest Args: digest -- digest of the file Returns: String rapresenting the absolute path of the file """ relPath = Fsdb.generateDirTreePath(digest, self._conf['deep']) return os.path.join(self.fsdbRoot, relPath) def _makedirs(self, path): """Make folders recursively for the given path and check read and write permission on the path Args
: path -- path to the leaf folder """ try: oldmask = os.umask(0) os.makedirs(path, self._conf['mode']) os.umask(oldmask) except OSError, e: if(e.errno == errno.EACCES): raise Exception("not sufficent permissions to write on fsdb folder: \""+path+'\"') elif(e.errno == errno.EEXIST): fstat = os.stat(path) if not stat.S_ISDIR(fstat.st_mode): raise Exception("fsdb folder already exists but it is not a regular folder: \""+path+'\"') elif not os.access(path, os.R_OK and os.W_OK): raise Exception("not sufficent permissions to write on fsdb folder: \""+path+'\"') else: raise e def __str__(self): return "{root: "+self.fsdbRoot+", mode: "+str(oct(self._conf['mode']))+", deep: "+str(self._conf['deep'])+", hash_alg: "+self._conf['hash_alg']+"}" @staticmethod def fileDigest(filepath, algorithm="sha1", block_size=2**20): """Calculate digest File with the given digest will be removed from fsdb and the directory tree will be cleaned (remove empty folders) Args: digest -- digest of the file to remove """ if(algorithm == "md5"): algFunct = hashlib.md5 elif(algorithm == "sha1" or algorithm == "sha"): algFunct = hashlib.sha1 elif(algorithm == "sha224"): algFunct = hashlib.sha224 elif(algorithm == "sha256"): algFunct = hashlib.sha256 elif(algorithm == "sha384"): algFunct = hashlib.sha384 elif(algorithm == "sha512" or algorithm == "sha2"): algFunct = hashlib.sha512 else: raise ValueError('"' + algorithm + '" it is not a supported algorithm function') hashM = algFunct() with open(filepath, 'r') as f: data = f.read(block_size) hashM.update(data) return hashM.hexdigest() @staticmethod def generateDirTreePath(fileDigest, deep): """Generate a relative path from the given fileDigest relative path has a numbers of directories levels according to @deep Args: fileDigest -- digest for which the relative path will be generate deep -- number of levels to use in relative path generation Returns: relative path for the given digest """ if(deep < 0): raise Exception("deep level can not be negative") if(os.path.split(fileDigest)[1] != fileDigest): raise Exception("fileDigest cannot contain path separator") # calculate min length for the given deep (2^1+2^2+...+2^deep+ 1) min = (2**(deep+1))-1 if(len(fileDigest) < min): raise Exception("fileDigest too short for the given deep") path = "" index = 0 for p in range(1, deep+1): jump = 2**p path = os.path.join(path, fileDigest[index:index+jump]) index += jump path = os.path.join(path, fileDigest[index:]) return path @staticmethod def configExists(fsdbRoot): path = os.path.join(fsdbRoot, Fsdb.CONFIG_FILE) try: os.stat(path) except OSError, e: if(e.errno == errno.EACCES): raise Exception("not sufficent permissions to stat fsdb config file: \""+path+'\"') elif(e.errno == errno.ENOENT): return False else: raise e return