code
stringlengths
1
199k
from enum import Enum class EnumVMStatus(Enum): deploying = "deploying" running = "running" halted = "halted" paused = "paused" halting = "halting" migrating = "migrating" starting = "starting" error = "error" networkKilled = "networkKilled"
''' New Integration Test for creating KVM VM and check time for each stage. @author: Glody ''' import apibinding.inventory as inventory import zstackwoodpecker.test_util as test_util import zstackwoodpecker.test_lib as test_lib import zstackwoodpecker.test_state as test_state import os import random import string import time test_stub = test_lib.lib_get_test_stub() test_obj_dict = test_state.TestStateDict() def key_gen(key_len): keylist = [random.choice(string.letters+string.digits) for i in range(key_len)] return ("".join(keylist)) def test(): test_util.test_dsc('Create test vm and check the time spend on each stage') test_util.test_skip('Time cases need further polish, skip test right now') vm_name = 'vm_'+key_gen(7) begin_time = int(time.time()*1000) vm = test_stub.create_named_vm(vm_name) test_obj_dict.add_vm(vm) ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vr = test_lib.lib_find_vr_by_vm(vm.vm)[0] if vr.applianceVmType != "VirtualRouter": test_util.test_skip("This test only for VirtualRouter network") vm.check() [select_bs_time, allocate_host_time, allocate_ps_time, local_storage_allocate_capacity_time,\ allocate_volume_time, allocate_nic_time, instantiate_res_pre_time, create_on_hypervisor_time,\ instantiate_res_post_time] = test_stub.get_stage_time(vm_name, begin_time) test_util.test_dsc("select_bs_time is "+str(select_bs_time)) test_util.test_dsc("allocate_host_time is "+str(allocate_host_time)) test_util.test_dsc("allocate_ps_time is "+str(allocate_ps_time)) test_util.test_dsc("local_storage_allocate_capacity_time is "+str(local_storage_allocate_capacity_time)) test_util.test_dsc("allocate_volume_time is "+str(allocate_volume_time)) test_util.test_dsc("allocate_nic_time is "+str(allocate_nic_time)) test_util.test_dsc("instantiate_res_pre_time is "+str(instantiate_res_pre_time)) test_util.test_dsc("create_on_hypervisor_time is "+str(create_on_hypervisor_time)) test_util.test_dsc("instantiate_res_post_time is "+str(instantiate_res_post_time)) if select_bs_time > 10: test_util.test_fail('select_bs_time is bigger than 10 milliseconds') if allocate_host_time > 190: test_util.test_fail('allocate_host_time is bigger than 190 milliseconds') if allocate_ps_time > 70: test_util.test_fail('allocate_ps_time is bigger than 70 milliseconds') if local_storage_allocate_capacity_time > 70: test_util.test_fail('local_storage_allocate_capacity_time is bigger than 70 milliseconds') if allocate_volume_time > 90: test_util.test_fail('allocate_volume_time is bigger than 90 milliseconds') if allocate_nic_time > 70: test_util.test_fail('allocate_nic_time is bigger than 70 milliseconds') if instantiate_res_pre_time > 1300: test_util.test_fail('instantiate_res_pre_time is bigger than 1300 milliseconds') if create_on_hypervisor_time > 2500: test_util.test_fail('create_on_hypervisor_time is bigger than 2500 milliseconds') if instantiate_res_post_time > 30: test_util.test_fail('instantiate_res_post_time is bigger than 30 milliseconds') vm.destroy() test_util.test_pass('Create VM and Check time for Each Stage Test Success') def error_cleanup(): test_lib.lib_error_cleanup(test_obj_dict)
import pytest from share.regulate import Regulator from tests.share.normalize.factories import ( Agent, AgentIdentifier, Contributor, CreativeWork, Creator, Funder, Host, Institution, IsPartOf, Organization, Person, Publisher, Tag, WorkIdentifier, ) class TestModelNormalization: # test each tag resolves to lowercased, tokenized name @pytest.mark.parametrize('input, output', [(i, o) for input, o in [ ([ Tag(name=''), Tag(name=' '), Tag(name='\n\n\n'), ], []), ([ Tag(name='foo'), Tag(name='foO'), Tag(name='Foo'), Tag(name='FOO'), Tag(name=' FOO'), Tag(name=' foo\n\n\n'), ], [Tag(name='foo')]), ([ Tag(name='Rocket League'), Tag(name='rocket league'), Tag(name='ROCKET LEAGUE'), Tag(name='Rocket League'), Tag(name='\nRocket \n League\t'), Tag(name='rocket\nleague'), ], [Tag(name='rocket league')]), ([ Tag(name='Crash; Bandicoot'), Tag(name='Crash; Bandicoot'), Tag(name='\nCrash; Bandicoot'), Tag(name='crash, bandicoot'), Tag(name='Crash ,Bandicoot '), ], [Tag(name='bandicoot'), Tag(name='crash')]), ] for i in input]) def test_normalize_tag(self, input, output, Graph, ExpectedGraph): graph = Graph(CreativeWork(tags=[input])) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(CreativeWork(tags=output)) # test tags with the same name are merged on a work @pytest.mark.parametrize('input, output', [ ([ Tag(name=''), Tag(name=' '), Tag(name='\n\n\n'), ], []), ([ Tag(name='foo'), Tag(name='foO'), Tag(name='Foo'), Tag(name='FOO'), Tag(name=' FOO'), Tag(name=' foo\n\n\n'), ], [Tag(name='foo')]), ([ Tag(name='Rocket League'), Tag(name='rocket league'), Tag(name='ROCKET LEAGUE'), Tag(name='Rocket League'), Tag(name='\nRocket \n League\t'), Tag(name='rocket\nleague'), ], [Tag(name='rocket league')]), ([ Tag(name='Crash; Bandicoot'), Tag(name='Crash; Bandicoot'), Tag(name='\nCrash; Bandicoot'), Tag(name='crash, bandicoot'), Tag(name='Crash ,Bandicoot '), ], [Tag(name='bandicoot'), Tag(name='crash')]), ]) @pytest.mark.skip def test_normalize_tags_on_work(self, input, output, Graph, ExpectedGraph): graph = Graph(CreativeWork(tags=input)) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(CreativeWork(tags=output)) @pytest.mark.parametrize('input, output', [(i, o) for input, o in [ ([ Person(name='Smith, J'), Person(name='Smith, J'), ], Person(name='Smith, J')), ([ Person(name='J Smith '), ], Person(name='J Smith')), ([ Person(given_name='J', family_name='Smith'), Person(given_name=' J', family_name='\n\nSmith'), ], Person(name='J Smith', family_name='Smith', given_name='J')), ([ Person(name='Johnathan James Doe'), ], Person(name='Johnathan James Doe')), ([ Person(name='johnathan james doe'), ], Person(name='johnathan james doe')), ([ Person(name='johnathan james doe JR'), ], Person(name='johnathan james doe JR')), ([ Person(name='none'), Person(name=''), Person(name='NULL'), Person(name='None'), Person(name=' '), Person(name=' None '), ], None) ] for i in input]) def test_normalize_person(self, input, output, Graph, ExpectedGraph): graph = Graph(input) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(output or []) # test two people with the same identifier are merged # sort by length and then alphabetize name field @pytest.mark.parametrize('input, output', [ # same name, same identifier ([ Person(0, name='Barb Dylan', identifiers=[AgentIdentifier(1)]), Person(1, name='Barb Dylan', identifiers=[AgentIdentifier(1)]), Person(2, name='Barb Dylan', identifiers=[AgentIdentifier(1)]), ], [Person(2, name='Barb Dylan', identifiers=[AgentIdentifier(1)])]), ([ Person(0, name='Barb Dylan', identifiers=[AgentIdentifier(1)]), Person(1, name='Barb Dylan', identifiers=[AgentIdentifier(1)]), Person(2, name='Barb Dylan', identifiers=[AgentIdentifier(1)]), Person(3, name='Barb Dylan', identifiers=[AgentIdentifier(1)]), ], [Person(3, name='Barb Dylan', identifiers=[AgentIdentifier(1)])]), # same name, different identifiers ([ Person(name='Barb Dylan', identifiers=[AgentIdentifier(1)]), Person(name='Barb Dylan', identifiers=[AgentIdentifier(2)]) ], [ Person(name='Barb Dylan', identifiers=[AgentIdentifier(1)]), Person(name='Barb Dylan', identifiers=[AgentIdentifier(2)]) ]), # no name - name, same identifier ([ Person(name='', identifiers=[AgentIdentifier(1)]), Person(name='Barb Dylan', identifiers=[AgentIdentifier(1)]) ], [Person(name='Barb Dylan', identifiers=[AgentIdentifier(1)])]), # two names, same identifier, take longer name ([ Person(name='Barb Dylan', identifiers=[AgentIdentifier(1)]), Person(name='Barbra Dylan', identifiers=[AgentIdentifier(1)]) ], [Person(name='Barbra Dylan', identifiers=[AgentIdentifier(1)])]), # two sames, same length, same identifier, alphabetize and take first ([ Person(name='Barb Dylan', identifiers=[AgentIdentifier(1)]), Person(name='Aarb Dylan', identifiers=[AgentIdentifier(1)]) ], [Person(name='Aarb Dylan', identifiers=[AgentIdentifier(1)])]), # 3 different names, take longest of each name field ([ # Below case WILL FAIL. Haven't seen just a last name... yet # Person(name='Dylan', identifiers=[AgentIdentifier(1)]), Person(name='Dylan, B', identifiers=[AgentIdentifier(1)]), Person(name='Barb Dylan', identifiers=[AgentIdentifier(1)]), Person(name='B. D. Dylan', identifiers=[AgentIdentifier(1)]) ], [Person(name='B. D. Dylan', identifiers=[AgentIdentifier(1)])]), ]) @pytest.mark.skip def test_normalize_person_relation(self, input, output, Graph, ExpectedGraph): graph = Graph(*input) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(*output) @pytest.mark.parametrize('input, output', [ (Agent(name='none'), None), (Agent(name=''), None), (Agent(name='NULL'), None), (Agent(name='None'), None), (Agent(name=' '), None), (Agent(name=' None '), None), (Agent(name=' Empty Foundation '), Organization(name='Empty Foundation')), (Agent(name='University \n of Arizona '), Institution(name='University of Arizona')), (Agent(name='NMRC, University College, Cork, Ireland'), Institution(name='NMRC, University College', location='Cork, Ireland')), (Agent(name='Ioffe Physico-Technical Institute'), Institution(name='Ioffe Physico-Technical Institute')), (Agent(name='DPTA'), Organization(name='DPTA')), (Agent(name='B. Verkin Institute for Low Temperatures Physics & Engineering, Kharkov, Ukraine'), Institution(name='B. Verkin Institute for Low Temperatures Physics & Engineering', location='Kharkov, Ukraine', type='institution')), (Agent(name='Physikalisches Institut, University Wuerzburg, Germany'), Agent(name='Physikalisches Institut', location='University Wuerzburg, Germany', type='institution')), (Agent(name='Centro de Biotecnologia e Departamento de Biofísica; UFRGS; Av Bento Goncalves 9500, Predio 43431 sala 213 91501-970 Porto Alegre Rio Grande do Sul Brazi'), Agent(name='UFRGS - Centro de Biotecnologia e Departamento de Biofísica', location='Av Bento Goncalves 9500, Predio 43431 sala 213 91501-970 Porto Alegre Rio Grande do Sul Brazi')), (Agent(name='Department of Chemistry; ZheJiang University; HangZhou ZheJiang CHINA'), Institution(name='ZheJiang University - Department of Chemistry', location='HangZhou ZheJiang CHINA')), (Agent(name='Marine Evolution and Conservation; Groningen Institute for Evolutionary Life Sciences; University of Groningen; Nijenborgh 7, 9747 AG Groningen The Netherlands'), Institution(name='University of Groningen - Marine Evolution and Conservation; Groningen Institute for Evolutionary Life Sciences', location='Nijenborgh 7, 9747 AG Groningen The Netherlands')), (Agent(name='Institute of Marine Research; PO Box 1870 Nordnes, 5817 Bergen Norway'), Institution(name='Institute of Marine Research', location='PO Box 1870 Nordnes, 5817 Bergen Norway')), (Agent(name=' PeerJ Inc. '), Organization(name='PeerJ Inc.')), (Agent(name=' Clinton Foundation\n '), Organization(name='Clinton Foundation')), ]) def test_normalize_agent(self, input, output, Graph, ExpectedGraph): graph = Graph(input) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(output or []) # test two organizations/institutions with the same name are merged # sort by length and then alphabetize name field @pytest.mark.parametrize('input, output', [ # same name, same identifiers ([ Organization(name='American Heart Association', identifiers=[AgentIdentifier(1)]), Organization(name='American Heart Association', identifiers=[AgentIdentifier(1)]) ], [Organization(name='American Heart Association', identifiers=[AgentIdentifier(1)])]), # same name, different identifiers ([ Organization(name='Money Foundation', identifiers=[AgentIdentifier(1)]), Organization(name='Money Foundation', identifiers=[AgentIdentifier(2)]) ], [ Organization(name='Money Foundation', identifiers=[AgentIdentifier(1)]), Organization(name='Money Foundation', identifiers=[AgentIdentifier(2)]), ]), # same name, different identifiers, different capitilization ([ Organization(name='Money Foundation', identifiers=[AgentIdentifier(1)]), Organization(name='MONEY FOUNDATION', identifiers=[AgentIdentifier(2)]) ], [ Organization(name='Money Foundation', identifiers=[AgentIdentifier(1)]), Organization(name='MONEY FOUNDATION', identifiers=[AgentIdentifier(2)]) ]), # same identifier, different type, accept more specific type ([ Institution(name='University of Virginia', identifiers=[AgentIdentifier(1)]), Organization(name='University of Virginia', identifiers=[AgentIdentifier(1)]), ], [ Institution(name='University of Virginia', identifiers=[AgentIdentifier(1)]) ]), # same identifier, same name, same length, different capitilization, alphabetize ([ Organization(name='Share', identifiers=[AgentIdentifier(1)]), Organization(name='SHARE', identifiers=[AgentIdentifier(1)]) ], [Organization(name='SHARE', identifiers=[AgentIdentifier(1)])]), # same name, one identifier, add identifier ([ Organization(name='Timetables Inc.'), Organization(name='Timetables Inc.', identifiers=[AgentIdentifier(1)]) ], [ Organization(name='Timetables Inc.'), Organization(name='Timetables Inc.', identifiers=[AgentIdentifier(1)]) ]), # same identifier, different name, accept longest alphabetize ([ Institution(name='Cooking Institute', identifiers=[AgentIdentifier(1)]), Institution(name='Cooking Instituze', identifiers=[AgentIdentifier(1)]), Institution(name='Cook Institute', identifiers=[AgentIdentifier(1)]) ], [Institution(name='Cooking Institute', identifiers=[AgentIdentifier(1)])]), ]) @pytest.mark.skip def test_normalize_organization_institution_name(self, input, output, Graph, ExpectedGraph): graph = Graph(*input) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(*output) # test different types of agent work relations # Funder, Publisher, Host @pytest.mark.parametrize('input, output', [ # same name, same identifiers ([ Host(cited_as='American Heart Association', agent=Organization(1, name='American Heart Association', identifiers=[AgentIdentifier(1)])), Funder(cited_as='American Heart Association', agent=Organization(1, id=1, name='American Heart Association', identifiers=[AgentIdentifier(1, id=0)])), ], [ Host(cited_as='American Heart Association', agent=Organization(1, id=1, name='American Heart Association', identifiers=[AgentIdentifier(1, id=0)])), Funder(cited_as='American Heart Association', agent=Organization(id=1)), ]), # same name, different identifiers ([ Host(cited_as='Money Foundation', agent=Organization(name='Money Foundation', identifiers=[AgentIdentifier(1)])), Funder(cited_as='Money Foundation', agent=Organization(id=1, name='Money Foundation', identifiers=[AgentIdentifier(2)])), ], [ Host(cited_as='Money Foundation', agent=Organization(name='Money Foundation', identifiers=[AgentIdentifier(1)])), Funder(cited_as='Money Foundation', agent=Organization(id=1, name='Money Foundation', identifiers=[AgentIdentifier(2)])), ]), # same identifier, different type ([ Publisher(cited_as='University of Virginia', agent=Institution(name='University of Virginia', identifiers=[AgentIdentifier(1)])), Funder(cited_as='University of Virginia', agent=Institution(id=1, name='University of Virginia', identifiers=[AgentIdentifier(1, id=0)])), ], [ Publisher(cited_as='University of Virginia', agent=Institution(id=1, name='University of Virginia', identifiers=[AgentIdentifier(1, id=0)])), Funder(cited_as='University of Virginia', agent=Institution(id=1)), ]), # same identifier, same name, same length, different capitilization, alphabetize ([ Publisher(cited_as='Share', agent=Organization(id=0, name='Share', identifiers=[AgentIdentifier(1, id=2)])), Host(cited_as='SHARE', agent=Organization(id=1, name='SHARE', identifiers=[AgentIdentifier(1, id=3)])) ], [ Publisher(cited_as='Share', agent=Organization(id=0, name='SHARE', identifiers=[AgentIdentifier(1, id=3)])), Host(cited_as='SHARE', agent=Organization(id=0)) ]), # same name, one identifier, add identifier ([ Funder(cited_as='Timetables Inc.', agent=Organization(id=1, name='Timetables Inc.')), Publisher(cited_as='Timetables Inc.', agent=Organization(id=2, name='Timetables Inc.', identifiers=[AgentIdentifier(1)])) ], [ Funder(cited_as='Timetables Inc.', agent=Organization(id=1, name='Timetables Inc.')), Publisher(cited_as='Timetables Inc.', agent=Organization(id=2, name='Timetables Inc.', identifiers=[AgentIdentifier(1)])) ]), # same identifier, different name, accept longest alphabetize ([ Funder(cited_as='Cooking Institute', agent=Organization(id=1, name='Cooking Notaninstitute', identifiers=[AgentIdentifier(1)])), Publisher(cited_as='Cooking Instituze', agent=Organization(id=2, name='Cooking Notaninstituze', identifiers=[AgentIdentifier(1)])), Host(cited_as='Cook Institute', agent=Organization(id=3, name='Cook Notaninstitute', identifiers=[AgentIdentifier(1)])) ], [ Funder(cited_as='Cooking Institute', agent=Organization(id=1, name='Cooking Notaninstitute', identifiers=[AgentIdentifier(1)])), Publisher(cited_as='Cooking Instituze', agent=Organization(id=1)), Host(cited_as='Cook Institute', agent=Organization(id=1)) ]), # same identifier, different name, different type, accept longest alphabetize, more specific ([ Funder(cited_as='Cooking Institute', agent=Institution(id=1, name='Cooking Notaninstitute', identifiers=[AgentIdentifier(1)])), Publisher(cited_as='Cooking Instituze', agent=Organization(id=2, name='Cooking Notaninstituze', identifiers=[AgentIdentifier(1)])), Host(cited_as='Cook Institute', agent=Institution(id=3, name='Cook Notaninstitute', identifiers=[AgentIdentifier(1)])) ], [ Funder(cited_as='Cooking Institute', agent=Institution(id=1, name='Cooking Notaninstitute', identifiers=[AgentIdentifier(1)])), Publisher(cited_as='Cooking Instituze', agent=Institution(id=1)), Host(cited_as='Cook Institute', agent=Institution(id=1)) ]), ]) @pytest.mark.skip def test_normalize_mixed_agent_relation(self, input, output, Graph, ExpectedGraph): graph = Graph(CreativeWork(agent_relations=input)) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(CreativeWork(agent_relations=output)) # test different types of agent work relations # Contributor, Creator @pytest.mark.parametrize('input, output', [ # same name, same identifiers, different type, same type tree, organization ([ Creator(cited_as='American Heart Association', agent=Organization(id=0, name='American Heart Association', identifiers=[AgentIdentifier(1, id=1)])), Contributor(cited_as='American Heart Association', agent=Organization(id=1, name='American Heart Association', identifiers=[AgentIdentifier(1, id=2)])) ], [ Creator(cited_as='American Heart Association', agent=Organization(id=1, name='American Heart Association', identifiers=[AgentIdentifier(1, id=2)])), Contributor(cited_as='American Heart Association', agent=Organization(id=1)), ]), # same name, different identifiers, different type, same type tree ([ Creator(cited_as='Money Foundation', agent=Organization(id=1, name='Money Foundation', identifiers=[AgentIdentifier()])), Contributor(cited_as='Money Foundation', agent=Organization(id=2, name='Money Foundation', identifiers=[AgentIdentifier()])), ], [ Creator(cited_as='Money Foundation', agent=Organization(id=1, name='Money Foundation', identifiers=[AgentIdentifier()])), Contributor(cited_as='Money Foundation', agent=Organization(id=2, name='Money Foundation', identifiers=[AgentIdentifier()])), ]), # same identifier, same name, different type ([ Contributor(cited_as='University of Virginia', agent=Institution(id=0, name='University of Virginia', identifiers=[AgentIdentifier(1)])), Publisher(cited_as='University of Virginia', agent=Institution(id=1, name='University of Virginia', identifiers=[AgentIdentifier(1)])) ], [ Contributor(cited_as='University of Virginia', agent=Institution(id=1, name='University of Virginia', identifiers=[AgentIdentifier(1)])), Publisher(cited_as='University of Virginia', agent=Institution(id=1)) ]), # same identifier, same name, different type, same type tree, person ([ Creator(cited_as='Bob Dylan', agent=Person(id=0, name='Bob Dylan', identifiers=[AgentIdentifier(1, id=0)])), Contributor(cited_as='Bob Dylan', agent=Person(id=1, name='Bob Dylan', identifiers=[AgentIdentifier(1, id=1)])), ], [ Creator(cited_as='Bob Dylan', agent=Person(id=0, name='Bob Dylan', identifiers=[AgentIdentifier(1, id=0)])), Contributor(cited_as='Bob Dylan', agent=Person(id=0)), ]), # same identifier, different name, different type ([ Creator(cited_as='B. Dylan', agent=Person(id=0, name='B. Dylan', identifiers=[AgentIdentifier(1, id=0)])), Contributor(cited_as='Bob Dylan', agent=Person(id=1, name='Bob Dylan', identifiers=[AgentIdentifier(1, id=1)])), ], [ Creator(cited_as='B. Dylan', agent=Person(id=0, name='Bob Dylan', identifiers=[AgentIdentifier(1, id=0)])), Contributor(cited_as='Bob Dylan', agent=Person(id=0)), ]), # same name, one identifier, add identifier ([ Creator(1, id=0, order_cited=4, cited_as='Timetables Inc.', agent=Organization(id=0, name='Timetables Inc.')), Creator(1, id=1, order_cited=20, cited_as='Timetables Inc.', agent=Organization(id=1, name='Timetables Inc.', identifiers=[AgentIdentifier()])) ], [ Creator(1, id=0, order_cited=4, cited_as='Timetables Inc.', agent=Organization(id=0, name='Timetables Inc.')), Creator(1, id=1, order_cited=20, cited_as='Timetables Inc.', agent=Organization(id=1, name='Timetables Inc.', identifiers=[AgentIdentifier()])) ]), # same identifier, different name, accept longest alphabetize ([ Creator(cited_as='Cooking Institute', agent=Organization(id=1, name='Cooking Institute', identifiers=[AgentIdentifier(1, id=1)])), Contributor(cited_as='Cooking Instituze', agent=Organization(id=2, name='Cooking Instituze', identifiers=[AgentIdentifier(1, id=2)])), Funder(cited_as='Cook Institute', agent=Organization(id=3, name='Cook Institute', identifiers=[AgentIdentifier(1, id=3)])) ], [ Creator(cited_as='Cooking Institute', agent=Institution(id=1, name='Cooking Institute', identifiers=[AgentIdentifier(1, id=3)])), Contributor(cited_as='Cooking Instituze', agent=Organization(id=1)), Funder(cited_as='Cook Institute', agent=Institution(id=1)) ]), # same identifier, different name, different type, accept longest alphabetize, more specific ([ Creator(cited_as='Cooking Institute', order_cited=10, agent=Institution(id=0, name='Cooking Institute', identifiers=[AgentIdentifier(1, id=1)])), Contributor(cited_as='Cooking Instituze', agent=Organization(id=1, name='Cooking Instituze', identifiers=[AgentIdentifier(1, id=2)])), Funder(cited_as='Cook Institute', agent=Institution(id=2, name='Cook Institute', identifiers=[AgentIdentifier(1, id=3)])) ], [ Creator(cited_as='Cooking Institute', order_cited=10, agent=Institution(id=0, name='Cooking Institute', identifiers=[AgentIdentifier(1, id=3)])), Contributor(cited_as='Cooking Instituze', agent=Institution(id=0)), Funder(cited_as='Cook Institute', agent=Institution(id=0)) ]), # Related agent removed ([ Creator(cited_as='', agent=Person(id=0, name='None', identifiers=[AgentIdentifier(1, id=1)])), ], [ ]), # Nameless agent with cited_as ([ Creator(cited_as='Magpie', agent=Person(id=0, name='', identifiers=[AgentIdentifier(1, id=1)])), ], [ Creator(cited_as='Magpie', agent=Person(id=0, name='Magpie', identifiers=[AgentIdentifier(1, id=1)])), ]), ]) @pytest.mark.skip def test_normalize_contributor_creator_relation(self, input, output, Graph, ExpectedGraph): graph = Graph(CreativeWork(agent_relations=input)) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(CreativeWork(agent_relations=output)) # test work with related work @pytest.mark.parametrize('input, output', [ # different identifiers ( CreativeWork(1, identifiers=[WorkIdentifier(1)], related_works=[ CreativeWork(2, identifiers=[WorkIdentifier(2)]), ]), CreativeWork(1, identifiers=[WorkIdentifier(1)], related_works=[ CreativeWork(2, identifiers=[WorkIdentifier(2)]), ]), ), # same and different identifiers ( CreativeWork(1, identifiers=[WorkIdentifier(1)], outgoing_creative_work_relations=[ IsPartOf(1, related=CreativeWork(1, identifiers=[WorkIdentifier(1)])), IsPartOf(2, related=CreativeWork(2, identifiers=[WorkIdentifier(2)])), ]), CreativeWork(1, identifiers=[WorkIdentifier(1)], outgoing_creative_work_relations=[ IsPartOf(2, related=CreativeWork(2, identifiers=[WorkIdentifier(2)])), ]), ), # circular relation ( CreativeWork(1, id=1, related_works=[CreativeWork(id=1)]), CreativeWork(1, id=1), ), ]) @pytest.mark.skip def test_normalize_related_work(self, input, output, Graph, ExpectedGraph): graph = Graph(input) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(output) @pytest.mark.parametrize('input, output', [ ({'title': '', 'description': ''}, {'title': '', 'description': ''}), ({'title': ' ', 'description': ' '}, {'title': '', 'description': ''}), ({'title': 'Title\nLine'}, {'title': 'Title Line'}), ({'description': 'Line\nAfter\nLine\nAfter\nLine'}, {'description': 'Line After Line After Line'}), ({'description': 'null'}, {'description': ''}), ]) def test_normalize_creativework(self, input, output, Graph, ExpectedGraph): graph = Graph(CreativeWork(**input)) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(CreativeWork(**output)) @pytest.mark.parametrize('input, output', [ (input, Creator(cited_as='James Bond', agent=Person(name='James Bond')),) for input in [ Creator(cited_as=' \t James\n Bond \t ', agent=Person(name='James Bond')), Creator(cited_as='', agent=Person(name='James Bond')), Creator(cited_as='', agent=Person(name='James Bond')), ] ] + [ (input, Contributor(cited_as='James Bond', agent=Person(name='James Bond')),) for input in [ Contributor(cited_as=' \t James\n Bond \t ', agent=Person(name='James Bond')), Contributor(cited_as='', agent=Person(name='James Bond')), ] ] + [ ( Creator(cited_as='', agent=Person(given_name='James', family_name='Bond')), Creator(cited_as='James Bond', agent=Person(name='James Bond', given_name='James', family_name='Bond')), ), ]) def test_normalize_agentworkrelation(self, input, output, Graph, ExpectedGraph): graph = Graph(input) Regulator(validate=False).regulate(graph) assert graph == ExpectedGraph(output)
import functools import os import tempfile import base64 from tempfile import mkstemp from OpenSSL import crypto import M2Crypto from M2Crypto import X509 from extensions.sfa.util.faults import CertExpired, CertMissingParent, CertNotSignedByParent from extensions.sfa.util.sfalogging import logger glo_passphrase_callback = None def set_passphrase_callback(callback_func): global glo_passphrase_callback glo_passphrase_callback = callback_func def set_passphrase(passphrase): set_passphrase_callback( lambda k,s,x: passphrase ) def test_passphrase(string, passphrase): try: crypto.load_privatekey(crypto.FILETYPE_PEM, string, (lambda x: passphrase)) return True except: return False def convert_public_key(key): keyconvert_path = "/usr/bin/keyconvert.py" if not os.path.isfile(keyconvert_path): raise IOError, "Could not find keyconvert in %s" % keyconvert_path # we can only convert rsa keys if "ssh-dss" in key: raise Exception, "keyconvert: dss keys are not supported" (ssh_f, ssh_fn) = tempfile.mkstemp() ssl_fn = tempfile.mktemp() os.write(ssh_f, key) os.close(ssh_f) cmd = keyconvert_path + " " + ssh_fn + " " + ssl_fn os.system(cmd) # this check leaves the temporary file containing the public key so # that it can be expected to see why it failed. # TODO: for production, cleanup the temporary files if not os.path.exists(ssl_fn): raise Exception, "keyconvert: generated certificate not found. keyconvert may have failed." k = Keypair() try: k.load_pubkey_from_file(ssl_fn) return k except Exception as e: raise e finally: # remove the temporary files if os.path.exists(ssh_fn): os.remove(ssh_fn) if os.path.exists(ssl_fn): os.remove(ssl_fn) class Keypair: key = None # public/private keypair m2key = None # public key (m2crypto format) ## # Creates a Keypair object # @param create If create==True, creates a new public/private key and # stores it in the object # @param string If string!=None, load the keypair from the string (PEM) # @param filename If filename!=None, load the keypair from the file def __init__(self, create=False, string=None, filename=None): if create: self.create() if string: self.load_from_string(string) if filename: self.load_from_file(filename) ## # Create a RSA public/private key pair and store it inside the keypair object def create(self): self.key = crypto.PKey() self.key.generate_key(crypto.TYPE_RSA, 1024) ## # Save the private key to a file # @param filename name of file to store the keypair in def save_to_file(self, filename): open(filename, 'w').write(self.as_pem()) self.filename=filename ## # Load the private key from a file. Implicity the private key includes the public key. def load_from_file(self, filename): self.filename=filename buffer = open(filename, 'r').read() self.load_from_string(buffer) ## # Load the private key from a string. Implicitly the private key includes the public key. def load_from_string(self, string): if glo_passphrase_callback: self.key = crypto.load_privatekey(crypto.FILETYPE_PEM, string, functools.partial(glo_passphrase_callback, self, string) ) self.m2key = M2Crypto.EVP.load_key_string(string, functools.partial(glo_passphrase_callback, self, string) ) else: self.key = crypto.load_privatekey(crypto.FILETYPE_PEM, string) self.m2key = M2Crypto.EVP.load_key_string(string) ## # Load the public key from a string. No private key is loaded. def load_pubkey_from_file(self, filename): # load the m2 public key m2rsakey = M2Crypto.RSA.load_pub_key(filename) self.m2key = M2Crypto.EVP.PKey() self.m2key.assign_rsa(m2rsakey) # create an m2 x509 cert m2name = M2Crypto.X509.X509_Name() m2name.add_entry_by_txt(field="CN", type=0x1001, entry="junk", len=-1, loc=-1, set=0) m2x509 = M2Crypto.X509.X509() m2x509.set_pubkey(self.m2key) m2x509.set_serial_number(0) m2x509.set_issuer_name(m2name) m2x509.set_subject_name(m2name) ASN1 = M2Crypto.ASN1.ASN1_UTCTIME() ASN1.set_time(500) m2x509.set_not_before(ASN1) m2x509.set_not_after(ASN1) # x509v3 so it can have extensions # prob not necc since this cert itself is junk but still... m2x509.set_version(2) junk_key = Keypair(create=True) m2x509.sign(pkey=junk_key.get_m2_pkey(), md="sha1") # convert the m2 x509 cert to a pyopenssl x509 m2pem = m2x509.as_pem() pyx509 = crypto.load_certificate(crypto.FILETYPE_PEM, m2pem) # get the pyopenssl pkey from the pyopenssl x509 self.key = pyx509.get_pubkey() self.filename=filename ## # Load the public key from a string. No private key is loaded. def load_pubkey_from_string(self, string): (f, fn) = tempfile.mkstemp() os.write(f, string) os.close(f) self.load_pubkey_from_file(fn) os.remove(fn) ## # Return the private key in PEM format. def as_pem(self): return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.key) ## # Return an M2Crypto key object def get_m2_pkey(self): if not self.m2key: self.m2key = M2Crypto.EVP.load_key_string(self.as_pem()) return self.m2key ## # Returns a string containing the public key represented by this object. def get_pubkey_string(self): m2pkey = self.get_m2_pkey() return base64.b64encode(m2pkey.as_der()) ## # Return an OpenSSL pkey object def get_openssl_pkey(self): return self.key ## # Given another Keypair object, return TRUE if the two keys are the same. def is_same(self, pkey): return self.as_pem() == pkey.as_pem() def sign_string(self, data): k = self.get_m2_pkey() k.sign_init() k.sign_update(data) return base64.b64encode(k.sign_final()) def verify_string(self, data, sig): k = self.get_m2_pkey() k.verify_init() k.verify_update(data) return M2Crypto.m2.verify_final(k.ctx, base64.b64decode(sig), k.pkey) def compute_hash(self, value): return self.sign_string(str(value)) # only informative def get_filename(self): return getattr(self,'filename',None) def dump (self, *args, **kwargs): print self.dump_string(*args, **kwargs) def dump_string (self): result="" result += "KEYPAIR: pubkey=%40s..."%self.get_pubkey_string() filename=self.get_filename() if filename: result += "Filename %s\n"%filename return result class Certificate: digest = "md5" cert = None issuerKey = None issuerSubject = None parent = None isCA = None # will be a boolean once set separator="-----parent-----" ## # Create a certificate object. # # @param lifeDays life of cert in days - default is 1825==5 years # @param create If create==True, then also create a blank X509 certificate. # @param subject If subject!=None, then create a blank certificate and set # it's subject name. # @param string If string!=None, load the certficate from the string. # @param filename If filename!=None, load the certficiate from the file. # @param isCA If !=None, set whether this cert is for a CA def __init__(self, lifeDays=1825, create=False, subject=None, string=None, filename=None, isCA=None): self.data = {} if create or subject: self.create(lifeDays) if subject: self.set_subject(subject) if string: self.load_from_string(string) if filename: self.load_from_file(filename) # Set the CA bit if a value was supplied if isCA != None: self.set_is_ca(isCA) # Create a blank X509 certificate and store it in this object. def create(self, lifeDays=1825): self.cert = crypto.X509() # FIXME: Use different serial #s self.cert.set_serial_number(3) self.cert.gmtime_adj_notBefore(0) # 0 means now self.cert.gmtime_adj_notAfter(lifeDays*60*60*24) # five years is default self.cert.set_version(2) # x509v3 so it can have extensions ## # Given a pyOpenSSL X509 object, store that object inside of this # certificate object. def load_from_pyopenssl_x509(self, x509): self.cert = x509 ## # Load the certificate from a string def load_from_string(self, string): # if it is a chain of multiple certs, then split off the first one and # load it (support for the ---parent--- tag as well as normal chained certs) string = string.strip() # If it's not in proper PEM format, wrap it if string.count('-----BEGIN CERTIFICATE') == 0: string = '-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----' % string # If there is a PEM cert in there, but there is some other text first # such as the text of the certificate, skip the text beg = string.find('-----BEGIN CERTIFICATE') if beg > 0: # skipping over non cert beginning string = string[beg:] parts = [] if string.count('-----BEGIN CERTIFICATE-----') > 1 and \ string.count(Certificate.separator) == 0: parts = string.split('-----END CERTIFICATE-----',1) parts[0] += '-----END CERTIFICATE-----' else: parts = string.split(Certificate.separator, 1) #print "Parts----", parts self.cert = crypto.load_certificate(crypto.FILETYPE_PEM, parts[0]) # if there are more certs, then create a parent and let the parent load # itself from the remainder of the string if len(parts) > 1 and parts[1] != '': self.parent = self.__class__() self.parent.load_from_string(parts[1]) ## # Load the certificate from a file def load_from_file(self, filename): file = open(filename) string = file.read() self.load_from_string(string) self.filename=filename ## # Save the certificate to a string. # # @param save_parents If save_parents==True, then also save the parent certificates. def save_to_string(self, save_parents=True): string = crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert) if save_parents and self.parent: string = string + self.parent.save_to_string(save_parents) return string ## # Save the certificate to a file. # @param save_parents If save_parents==True, then also save the parent certificates. def save_to_file(self, filename, save_parents=True, filep=None): string = self.save_to_string(save_parents=save_parents) if filep: f = filep else: f = open(filename, 'w') f.write(string) f.close() self.filename=filename ## # Save the certificate to a random file in /tmp/ # @param save_parents If save_parents==True, then also save the parent certificates. def save_to_random_tmp_file(self, save_parents=True): fp, filename = mkstemp(suffix='cert', text=True) fp = os.fdopen(fp, "w") self.save_to_file(filename, save_parents=True, filep=fp) return filename ## # Sets the issuer private key and name # @param key Keypair object containing the private key of the issuer # @param subject String containing the name of the issuer # @param cert (optional) Certificate object containing the name of the issuer def set_issuer(self, key, subject=None, cert=None): self.issuerKey = key if subject: # it's a mistake to use subject and cert params at the same time assert(not cert) if isinstance(subject, dict) or isinstance(subject, str): req = crypto.X509Req() reqSubject = req.get_subject() if (isinstance(subject, dict)): for key in subject.keys(): setattr(reqSubject, key, subject[key]) else: setattr(reqSubject, "CN", subject) subject = reqSubject # subject is not valid once req is out of scope, so save req self.issuerReq = req if cert: # if a cert was supplied, then get the subject from the cert subject = cert.cert.get_subject() assert(subject) self.issuerSubject = subject ## # Get the issuer name def get_issuer(self, which="CN"): x = self.cert.get_issuer() return getattr(x, which) ## # Set the subject name of the certificate def set_subject(self, name): req = crypto.X509Req() subj = req.get_subject() if (isinstance(name, dict)): for key in name.keys(): setattr(subj, key, name[key]) else: setattr(subj, "CN", name) self.cert.set_subject(subj) ## # Get the subject name of the certificate def get_subject(self, which="CN"): x = self.cert.get_subject() return getattr(x, which) def get_extended_subject(self): x = self.cert.get_subject() subject = dict() return dict(x.get_components()) ## # Get a pretty-print subject name of the certificate def get_printable_subject(self): x = self.cert.get_subject() return "[ OU: %s, CN: %s, SubjectAltName: %s ]" % (getattr(x, "OU"), getattr(x, "CN"), self.get_data()) ## # Get the public key of the certificate. # # @param key Keypair object containing the public key def set_pubkey(self, key): assert(isinstance(key, Keypair)) self.cert.set_pubkey(key.get_openssl_pkey()) ## # Get the public key of the certificate. # It is returned in the form of a Keypair object. def get_pubkey(self): m2x509 = X509.load_cert_string(self.save_to_string()) pkey = Keypair() pkey.key = self.cert.get_pubkey() pkey.m2key = m2x509.get_pubkey() return pkey def set_intermediate_ca(self, val): return self.set_is_ca(val) # Set whether this cert is for a CA. All signers and only signers should be CAs. # The local member starts unset, letting us check that you only set it once # @param val Boolean indicating whether this cert is for a CA def set_is_ca(self, val): if val is None: return if self.isCA != None: # Can't double set properties raise Exception, "Cannot set basicConstraints CA:?? more than once. Was %s, trying to set as %s" % (self.isCA, val) self.isCA = val if val: self.add_extension('basicConstraints', 1, 'CA:TRUE') else: self.add_extension('basicConstraints', 1, 'CA:FALSE') ## # Add an X509 extension to the certificate. Add_extension can only be called # once for a particular extension name, due to limitations in the underlying # library. # # @param name string containing name of extension # @param value string containing value of the extension def add_extension(self, name, critical, value): oldExtVal = None try: oldExtVal = self.get_extension(name) except: # M2Crypto LookupError when the extension isn't there (yet) pass # This code limits you from adding the extension with the same value # The method comment says you shouldn't do this with the same name # But actually it (m2crypto) appears to allow you to do this. if oldExtVal and oldExtVal == value: # don't add this extension again # just do nothing as here return # FIXME: What if they are trying to set with a different value? # Is this ever OK? Or should we raise an exception? ext = crypto.X509Extension (name, critical, value) self.cert.add_extensions([ext]) ## # Get an X509 extension from the certificate def get_extension(self, name): # pyOpenSSL does not have a way to get extensions m2x509 = X509.load_cert_string(self.save_to_string()) value = m2x509.get_ext(name).get_value() return value ## # Set_data is a wrapper around add_extension. It stores the parameter str in # the X509 subject_alt_name extension. Set_data can only be called once, due # to limitations in the underlying library. def set_data(self, str, field='subjectAltName'): # pyOpenSSL only allows us to add extensions, so if we try to set the # same extension more than once, it will not work if self.data.has_key(field): raise "Cannot set ", field, " more than once" self.data[field] = str self.add_extension(field, 0, str) ## # Return the data string that was previously set with set_data def get_data(self, field='subjectAltName'): if self.data.has_key(field): return self.data[field] try: uri = self.get_extension(field) self.data[field] = uri except LookupError: return None return self.data[field] ## # Sign the certificate using the issuer private key and issuer subject previous set with set_issuer(). def sign(self): assert self.cert != None assert self.issuerSubject != None assert self.issuerKey != None self.cert.set_issuer(self.issuerSubject) self.cert.sign(self.issuerKey.get_openssl_pkey(), self.digest) ## # Verify the authenticity of a certificate. # @param pkey is a Keypair object representing a public key. If Pkey # did not sign the certificate, then an exception will be thrown. def verify(self, pkey): # pyOpenSSL does not have a way to verify signatures m2x509 = X509.load_cert_string(self.save_to_string()) m2pkey = pkey.get_m2_pkey() # verify it return m2x509.verify(m2pkey) # XXX alternatively, if openssl has been patched, do the much simpler: # try: # self.cert.verify(pkey.get_openssl_key()) # return 1 # except: # return 0 ## # Return True if pkey is identical to the public key that is contained in the certificate. # @param pkey Keypair object def is_pubkey(self, pkey): return self.get_pubkey().is_same(pkey) ## # Given a certificate cert, verify that this certificate was signed by the # public key contained in cert. Throw an exception otherwise. # # @param cert certificate object def is_signed_by_cert(self, cert): k = cert.get_pubkey() result = self.verify(k) return result ## # Set the parent certficiate. # # @param p certificate object. def set_parent(self, p): self.parent = p ## # Return the certificate object of the parent of this certificate. def get_parent(self): return self.parent ## # Verification examines a chain of certificates to ensure that each parent # signs the child, and that some certificate in the chain is signed by a # trusted certificate. # # Verification is a basic recursion: <pre> # if this_certificate was signed by trusted_certs: # return # else # return verify_chain(parent, trusted_certs) # </pre> # # At each recursion, the parent is tested to ensure that it did sign the # child. If a parent did not sign a child, then an exception is thrown. If # the bottom of the recursion is reached and the certificate does not match # a trusted root, then an exception is thrown. # Also require that parents are CAs. # # @param Trusted_certs is a list of certificates that are trusted. # def verify_chain(self, trusted_certs = None): # Verify a chain of certificates. Each certificate must be signed by # the public key contained in it's parent. The chain is recursed # until a certificate is found that is signed by a trusted root. # verify expiration time if self.cert.has_expired(): raise CertExpired(self.get_printable_subject(), "client cert") # if this cert is signed by a trusted_cert, then we are set for trusted_cert in trusted_certs: if self.is_signed_by_cert(trusted_cert): # verify expiration of trusted_cert ? if not trusted_cert.cert.has_expired(): return trusted_cert else: raise CertExpired(self.get_printable_subject()," signer trusted_cert %s"%trusted_cert.get_printable_subject()) # if there is no parent, then no way to verify the chain if not self.parent: raise CertMissingParent(self.get_printable_subject() + ": Issuer %s is not one of the %d trusted roots, and cert has no parent." % (self.get_issuer(), len(trusted_certs))) # if it wasn't signed by the parent... if not self.is_signed_by_cert(self.parent): raise CertNotSignedByParent("%s: Parent %s, issuer %s"\ % (self.get_printable_subject(), self.parent.get_printable_subject(), self.get_issuer())) # Confirm that the parent is a CA. Only CAs can be trusted as # signers. # Note that trusted roots are not parents, so don't need to be # CAs. # Ugly - cert objects aren't parsed so we need to read the # extension and hope there are no other basicConstraints if not self.parent.isCA and not (self.parent.get_extension('basicConstraints') == 'CA:TRUE'): raise CertNotSignedByParent("%s: Parent %s not a CA" % (self.get_printable_subject(), self.parent.get_printable_subject())) # if the parent isn't verified... self.parent.verify_chain(trusted_certs) return ### more introspection def get_extensions(self): # pyOpenSSL does not have a way to get extensions triples=[] m2x509 = X509.load_cert_string(self.save_to_string()) nb_extensions=m2x509.get_ext_count() for i in range(nb_extensions): ext=m2x509.get_ext_at(i) triples.append( (ext.get_name(), ext.get_value(), ext.get_critical(),) ) return triples def get_data_names(self): return self.data.keys() def get_all_datas (self): triples=self.get_extensions() for name in self.get_data_names(): triples.append( (name,self.get_data(name),'data',) ) return triples # only informative def get_filename(self): return getattr(self,'filename',None) def dump (self, *args, **kwargs): print self.dump_string(*args, **kwargs) def dump_string (self,show_extensions=False): result = "" result += "CERTIFICATE for %s\n"%self.get_printable_subject() result += "Issued by %s\n"%self.get_issuer() filename=self.get_filename() if filename: result += "Filename %s\n"%filename if show_extensions: all_datas=self.get_all_datas() result += " has %d extensions/data attached"%len(all_datas) for (n,v,c) in all_datas: if c=='data': result += " data: %s=%s\n"%(n,v) else: result += " ext: %s (crit=%s)=<<<%s>>>\n"%(n,c,v) return result
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals) import os import re from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin from pants.backend.jvm.tasks.nailgun_task import NailgunTask from pants.base.config import Config from pants.base.exceptions import TaskError from pants.base.target import Target from pants.process.xargs import Xargs class Scalastyle(NailgunTask, JvmToolTaskMixin): """Checks scala source files to ensure they're stylish. Scalastyle only checks against scala sources in non-synthetic targets. Scalastyle is configured via the 'scalastyle' pants.ini section. * ``config`` - Required path of the scalastyle configuration file. If the file doesn't exist, the task will throw. * ``excludes`` - Optional path of an excludes file that contains lines of regular expressions used to exclude matching files from style checks. File names matched against these regular expressions are relative to the repository root (e.g.: com/twitter/mybird/MyBird.scala). If not specified, all scala sources in the targets will be checked. If the file doesn't exist, the task will throw. """ _CONFIG_SECTION = 'scalastyle' _CONFIG_SECTION_CONFIG_OPTION = 'config' _CONFIG_SECTION_EXCLUDES_OPTION = 'excludes' _SCALA_SOURCE_EXTENSION = '.scala' _MAIN = 'org.scalastyle.Main' _scalastyle_config = None _scalastyle_excludes = None @classmethod def register_options(cls, register): super(Scalastyle, cls).register_options(register) register('--skip', action='store_true', help='Skip scalastyle.') def __init__(self, *args, **kwargs): super(Scalastyle, self).__init__(*args, **kwargs) self._initialize_config() self._scalastyle_bootstrap_key = 'scalastyle' self.register_jvm_tool(self._scalastyle_bootstrap_key, ['//:scalastyle']) @property def config_section(self): return self._CONFIG_SECTION def _initialize_config(self): scalastyle_config = self.context.config.get( self._CONFIG_SECTION, self._CONFIG_SECTION_CONFIG_OPTION) # Scalastyle task by default isn't wired up in pants, but if it is installed # via plugin, then the config file setting is required. if not scalastyle_config: raise Config.ConfigError( 'Scalastyle config is missing from section[{section}] option[{setting}] in ' 'pants.ini.'.format( section=self._CONFIG_SECTION, setting=self._CONFIG_SECTION_CONFIG_OPTION)) # And the config setting value must be a valid file. if not os.path.exists(scalastyle_config): raise Config.ConfigError( 'Scalastyle config file specified in section[{section}] option[{setting}] in pants.ini ' 'does not exist: {file}'.format( section=self._CONFIG_SECTION, setting=self._CONFIG_SECTION_CONFIG_OPTION, file=scalastyle_config)) excludes_file = self.context.config.get( self._CONFIG_SECTION, self._CONFIG_SECTION_EXCLUDES_OPTION) scalastyle_excludes = set() if excludes_file: # excludes setting is optional, but if specified, must be a valid file. if not os.path.exists(excludes_file): raise Config.ConfigError( 'Scalastyle excludes file specified in section[{section}] option[{setting}] in ' 'pants.ini does not exist: {file}'.format( section=self._CONFIG_SECTION, setting=self._CONFIG_SECTION_EXCLUDES_OPTION, file=excludes_file)) with open(excludes_file) as fh: for pattern in fh.readlines(): scalastyle_excludes.add(re.compile(pattern.strip())) self.context.log.debug( 'Scalastyle file exclude pattern: {pattern}'.format(pattern=pattern)) else: # excludes setting is optional. self.context.log.debug( 'Unable to get section[{section}] option[{setting}] value in pants.ini. ' 'All scala sources will be checked.'.format( section=self._CONFIG_SECTION, setting=self._CONFIG_SECTION_EXCLUDES_OPTION)) # Only transfer to local variables to the state at the end to minimize side effects. self._scalastyle_config = scalastyle_config or None self._scalastyle_excludes = scalastyle_excludes or None @property def _should_skip(self): return self.get_options().skip def _get_non_synthetic_scala_targets(self, targets): return filter( lambda target: isinstance(target, Target) and target.has_sources(self._SCALA_SOURCE_EXTENSION) and (not target.is_synthetic), targets) def _should_include_source(self, source_filename): if not self._scalastyle_excludes: return True for exclude in self._scalastyle_excludes: if exclude.match(source_filename): return False return True def _get_non_excluded_scala_sources(self, scala_targets): # Get all the sources from the targets with the path relative to build root. scala_sources = list() for target in scala_targets: scala_sources.extend(target.sources_relative_to_buildroot()) # make sure only the sources with scala extension stay. scala_sources = filter( lambda filename: filename.endswith(self._SCALA_SOURCE_EXTENSION), scala_sources) # filter out all sources matching exclude patterns, if specified in config. scala_sources = filter(self._should_include_source, scala_sources) return scala_sources def execute(self): if self._should_skip: self.context.log.info('Skipping scalastyle.') return targets = self._get_non_synthetic_scala_targets(self.context.targets()) self.context.log.debug('Non synthetic scala targets to be checked:') for target in targets: self.context.log.debug(' {address_spec}'.format(address_spec=target.address.spec)) scala_sources = self._get_non_excluded_scala_sources(targets) self.context.log.debug('Non excluded scala sources to be checked:') for source in scala_sources: self.context.log.debug(' {source}'.format(source=source)) if scala_sources: def call(srcs): cp = self.tool_classpath(self._scalastyle_bootstrap_key) return self.runjava(classpath=cp, main=self._MAIN, args=['-c', self._scalastyle_config] + srcs) result = Xargs(call).execute(scala_sources) if result != 0: raise TaskError('java {entry} ... exited non-zero ({exit_code})'.format( entry=Scalastyle._MAIN, exit_code=result))
""" iframe-eventsource transport """ import asyncio from aiohttp import web, hdrs from sockjs.protocol import ENCODING from .base import StreamingTransport from .utils import session_cookie class EventsourceTransport(StreamingTransport): def send(self, text): blob = ''.join(('data: ', text, '\r\n\r\n')).encode(ENCODING) self.response.write(blob) self.size += len(blob) if self.size > self.maxsize: return True else: return False @asyncio.coroutine def process(self): headers = list( ((hdrs.CONTENT_TYPE, 'text/event-stream; charset=UTF-8'), (hdrs.CACHE_CONTROL, 'no-store, no-cache, must-revalidate, max-age=0')) + session_cookie(self.request)) # open sequence (sockjs protocol) resp = self.response = web.StreamResponse(headers=headers) resp.start(self.request) resp.write(b'\r\n') # handle session yield from self.handle_session() return resp
import pecan from pecan import rest from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from payload.cache import models from payload.openstack.common import log as logging LOG = logging.getLogger(__name__) class QueueCaller(object): """API representation of a queue caller.""" created_at = wtypes.text member_uuid = wtypes.text name = wtypes.text number = wtypes.text position = int queue_id = wtypes.text status = wtypes.text status_at = wtypes.text uuid = wtypes.text def __init__(self, **kwargs): self.fields = vars(models.QueueCaller) for k in self.fields: setattr(self, k, kwargs.get(k)) class QueueCallersController(rest.RestController): """REST Controller for queue callers.""" @wsme_pecan.wsexpose([QueueCaller], wtypes.text) def get_all(self, queue_id): """List callers from the specified queue. .. http:get:: /queues/:queue_uuid/callers **Example request**: .. sourcecode:: http GET /queues/cc096e0b-0c96-4b8b-b812-ef456f361ee3/callers **Example response**: .. sourcecode:: http [ { "created_at": "2014-12-12T02:05:14Z", "name": "Paul Belanger", "number": "6135551234", "position": 0, "queue_id": "cc096e0b-0c96-4b8b-b812-ef456f361ee3", "uuid": "e5814fee-6e8a-4771-8edd-ea413eff57f1", }, { "created_at": "2014-12-12T02:07:05Z", "name": "Leif Madsen", "number": "9055555678", "position": 1, "queue_id": "cc096e0b-0c96-4b8b-b812-ef456f361ee3", "uuid": "4b4fa110-be14-45b7-a998-2219ab8bee6f", } ] """ res = pecan.request.cache_api.list_queue_callers( queue_id=queue_id) return res @wsme_pecan.wsexpose(QueueCaller, wtypes.text, wtypes.text) def get_one(self, queue_id, uuid): """Get a single caller from the specified queue. .. http:get:: /queues/:queue_uuid/callers/:caller_uuid **Example request**: .. sourcecode:: http GET /queues/cc096e0b-0c96-4b8b-b812-ef456f361ee3/callers/\ e5814fee-6e8a-4771-8edd-ea413eff57f1 **Example response**: .. sourcecode:: http { "created_at": "2014-12-12T02:05:14Z", "name": "Paul Belanger", "number": "6135551234", "position": 0, "queue_id": "cc096e0b-0c96-4b8b-b812-ef456f361ee3", "uuid": "e5814fee-6e8a-4771-8edd-ea413eff57f1", } """ result = pecan.request.cache_api.get_queue_caller( queue_id=queue_id, uuid=uuid) return result
""" DOCS for dataenc as a module When run it should go through a few basic tests - see the function test() This module provides low-level functions to interleave two bits of data into each other and separate them. It will also encode this binary data to and from ascii - for inclusion in HTML, cookies or email transmission. It also provides high level functions to use these functions for time stamping passwords and password hashes, and also to check that a 'time-stamped hash' is both valid and unexpired. The check_pass function is interesting. Given an encoded and timestamped hash it compares it with the hash (using SD5) of a password. If it matches *and* is unexpired (you set the time limit) it returns a new encoded time stamp of the hash with the current time. I use this for secure, time limited, logins over CGI. (Could be stored in a cookie as well). (On the first login you will need to compare the password with the stored hash and use that to generate a time stamped hash to include in the page returned. Thereafter you can just use the check_pass function and include the time-stamped hash in a hidden form field for every action.) The binary data is interleaved on a 'bitwise' basis - every byte is mangled. -- CONSTANTS The main constant defined in dataenc.py is : TABLE = '_-0123456789' + \ 'abcdefghijklmnopqrstuvwxyz'+ \ 'NOPQRSTUVWXYZABCDEFGHIJKLM' TABLE should be exactly 64 printable characters long... or we'll all die horribly Obviously the same TABLE should be used for decoding as for encoding.... note - changing the order of the TABLE here can be used to change the mapping. Versions 1.1.2+ of TABLE uses only characters that are safe to pass in URLs (e.g. using the GET method for passing FORM data) OLD_TABLE is the previous encoding map used for versions of dataenc.py previous to 1.1.2 See the table_dec function for how to decode data encoded with that map. PSYCOIN = 1 This decides if we attempt to import psyco or not (the specialising compiler). Set to 0 to not import. If we attempt but fail to import psyco then this value will be set to 0. DATEIN = 1 As above but for the dateutils and time module. We need to import dateutils for the expired and pass_enc functions (amongst others) to work fully. FUNCTIONS Following are the docstrings extracted from the public functions : pass_enc(instring, indict = {}, **keywargs) Returns an ascii version of an SHA hash or a string, with the date/time stamped into it. e.g. For ascii safe storing of password hashes. It also accepts the following keyword args (or a dictionary conatining the following keys). (Keywords shown - with default values). lower = False, sha_hash = False, daynumber = None, timestamp = None, endleave = False Setting lower to True makes instring lowercase before hashing/encoding. If sha_hash is set to True then instead of the actual string passed in being encoded, it's SHA hash is encoded. (In either case the string can contain any binary data). If a daynumber is passed in then the daynumber will be encoded into the returned string. (daynumber is an integer representing the 'Julian day number' of a date - see the dateutils module). This can be used as a 'datestamp' for the generated code and you can detect anyone reusing old codes this way. If 'daynumber' is set to True then today's daynumber will automatically be used. (dateutils module required - otherwise it will be ignored). Max allowed value for daynumber is 16777215 (9th May 41222) (so daynumber can be any integer from 1 to 16777215 that you want to 'watermark' the hash with could be used as a session ID for a CGI for example). If a timestamp is passed in it should either be timestamp = True meanining use 'now'. Or it should be a tuple (HOUR, MINUTES). HOUR should be an integer 0-23 MINUTES should be an integer 0-59 The time and date stamp is *binary* interleaved, before encoding, into the data. If endleave is set to True then the timestamp is interleaved more securely. Shouldn't be necessary in practise because the stamp is so short and we subsequently encode using table_enc. If the string is long this will slow down the process - because we interleave twice. pass_dec(incode) Given a string encoded by pass_enc - it returns it decoded. It also extracts the datestamp and returns that. The return is : (instring, daynumber, timestamp) expired(daynumber, timestamp, validity) Given the length of time a password is valid for, it checks if a daynumber/timestamp tuple is still valid. validity should be an integer tuple (DAYS, HOURS, MINUTES). Returns True for valid or False for invalid. Needs the dateutils module to get the current daynumber. unexpired is an alias for expired - because it makes for better tests. (The return results from the expired function are logically the wrong way round, expired returns True if the timestamp is *not* expired..) check_pass(inhash, pswdhash, EXPIRE) Given the hash (possibly from a webpage or cookie) it checks that it is still valid and matches the password it is supposed to have. If so it returns a new hash - with the current time stamped into it. EXPIRE is a validity tuple to test for (see expired function) e.g. (0, 1, 0) means the supplied hash should be no older than 1 hour If the hash is expired it returns -1. If the pass is invalid or doesn't match the supplied pswdhash it returns False. This is a high level function that can do all your password checking and 'time-stamped hash' generation after initial login. makestamp(daynumber, timestamp) Receives a Julian daynumber (integer 1 to 16777215) and an (HOUR, MINUTES) tuple timestamp. Returns a 5 digit string of binary characters that represent that date/time. Can receive None for either or both of these arguments. The function 'daycount' in dateutils will turn a date into a daynumber. dec_datestamp(datestamp) Given a 5 character datestamp made by makestamp, it returns it as the tuple : (daynumber, timestamp). daynumber and timestamp can either be None *or* daynumber is an integer between 1 and 16777215 timestamp is (HOUR, MINUTES) The function 'counttodate' in dateutils will turn a daynumber back into a date. sixbit(invalue) Given a value in it returns a list representing the base 64 version of that number. Each value in the list is an integer from 0-63... The first member of the list is the most significant figure... down to the remainder. Should only be used for positive values. sixtoeight(intuple) Given four base 64 (6-bit) digits... it returns three 8 bit digits that represent the same value. If length of intuple != 4, or any digits are > 63, it returns None. **NOTE** Not quite the reverse of the sixbit function. table_enc(instring, table=TABLE) The actual function that performs TABLE encoding. It takes instring in three character chunks (three 8 bit values) and turns it into 4 6 bit characters. Each of these 6 bit characters maps to a character in TABLE. If the length of instring is not divisible by three it is padded with Null bytes. The number of Null bytes to remove is then encoded as a semi-random character at the start of the string. You can pass in an alternative 64 character string to do the encoding with if you want. table_dec(instring, table=TABLE) The function that performs TABLE decoding. Given a TABLE encoded string it returns the original binary data - as a string. If the data it's given is invalid (not data encoded by table_enc) it returns None (definition of invalid : not consisting of characters in the TABLE or length not len(instring) % 4 = 1). You can pass in an alternative 64 character string to do the decoding with if you want. return_now() Returns the time now. As (HOUR, MINUTES). binleave(data1, data2, endleave = False) Given two strings of binary data it interleaves data1 into data2 on a bitwise basis and returns a single string combining both. (not just the bytes interleaved). The returned string will be 4 bytes or so longer than the two strings passed in. Use bin_unleave to return the two strings again. Even if both strings passed in are ascii - the result will contain non-ascii characters. To keep ascii-safe you must subsequently encode with table_enc. Max length for the smallest data string (one string can be of unlimited size) is about 16meg (increasing this would be easy if anyone needed it - but would be very slow anyway). If either string is empty (or the smallest string greater than 16meg) - we return None. The first 4 characters of the string returned 'define' the interleave. (actually the size of the watermark) For added safety you could remove this and send seperately. Version 1.0.0 used a bf (bitfield) object from the python cookbook. Version 1.1.0 uses the binary and & and or | operations and is about 2.5 times faster. On my AMD 3000, leaving and unleaving two 20k files took 1.8 seconds. (instead of 4.5 previously - with Psyco enabled this improved to 0.4 seconds.....) Interleaving a file with a watermark of pretty much any size makes it unreadable - this is because *every* byte is changed. (Except perhaps a few at the end - see the endleave keyword). However it shouldn't be relied on if you need a really secure method of encryption. For many purposes it will be sufficient however. In practise any file not an exact multiple of the size of the watermark will have a chunk at the end that is untouched. To get round this you can set endleave = True.. which then releaves the end data back into itself. (and therefore takes twice as long - it shouldn't be necessary where you have a short watermark.) data2 ought to be the smaller string - or they will be swapped round internally. This could cause you to get them back in an unexpected order from binunleave. binunleave(data) Given a chunk of data woven by binleave - it returns the two seperate pieces of data. For the binary operations of binleave and binunleave, version 1.0.0 used a bf (bitfield) object from the python cookbook. class bf(object) the bf(object) from activestate python cookbook - by Sebastien Keim - Many Thanks http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/113799 Version 1.1.0 replaced these with specific binary AND & and OR | operations that are about 2.5 times faster. They are 'inline' in the functions for speed (avoiding function calls) but are available separately as well. def bittest(value, bitindex) This function returns the setting of any bit from a value. bitindex starts at 0. def bitset(value, bitindex, bit) Sets a bit, specified by bitindex, in in 'value' to 'bit'. bit should be 1 or 0 There are also the 'private functions' which actually contain the substance of binleave and binunleave, You are welcome to 'browse' them - but you shouldn't need to use them directly. Any comments, suggestions and bug reports welcome. Regards, Fuzzy michael AT foord DOT me DOT uk """ import hashlib from random import random DATEIN = 1 if DATEIN: try: # try to import the dateutils and time module from time import strftime from dateutils import daycount, returndate # ,counttodate # counttodate returns a daynumber as a date except: DATEIN = 0 PSYCOON = 1 if PSYCOON: try: import psyco psyco.full() from psyco.classes import * try: psyco.cannotcompile(re.compile) # psyco hinders rather than helps regular expression compilation except NameError: pass except: PSYCOON = 0 TABLE = '_-0123456789' + \ 'abcdefghijklmnopqrstuvwxyz'+ \ 'NOPQRSTUVWXYZABCDEFGHIJKLM' OLD_TABLE = '!$%^&*()_-+=' + \ 'abcdefghijklmnopqrstuvwxyz'+ \ 'NOPQRSTUVWXYZABCDEFGHIJKLM' def pass_enc(instring, indict=None, **keywargs): """Returns an ascii version of an SHA hash or a string, with the date/time stamped into it. e.g. For ascii safe storing of password hashes. It also accepts the following keyword args (or a dictionary conatining the following keys). (Keywords shown - with default values). lower = False, sha_hash = False, daynumber = None, timestamp = None, endleave = False Setting lower to True makes instring lowercase before hashing/encoding. If sha_hash is set to True then instead of the actual string passed in being encoded, it's SHA hash is encoded. (In either case the string can contain any binary data). If a daynumber is passed in then the daynumber will be encoded into the returned string. (daynumber is an integer representing the 'Julian day number' of a date - see the dateutils module). This can be used as a 'datestamp' for the generated code and you can detect anyone reusing old codes this way. If 'daynumber' is set to True then today's daynumber will automatically be used. (dateutils module required - otherwise it will be ignored). Max allowed value for daynumber is 16777215 (9th May 41222) (so daynumber can be any integer from 1 to 16777215 that you want to 'watermark' the hash with could be used as a session ID for a CGI for example). If a timestamp is passed in it should either be timestamp = True meanining use 'now'. Or it should be a tuple (HOUR, MINUTES). HOUR should be an integer 0-23 MINUTES should be an integer 0-59 The time and date stamp is *binary* interleaved, before encoding, into the data. If endleave is set to True then the timestamp is interleaved more securely. Shouldn't be necessary in practise because the stamp is so short and we subsequently encode using table_enc. If the string is long this will slow down the process - because we interleave twice. """ if indict == None: indict = {} arglist = {'lower' : False, 'sha_hash' : False, 'daynumber' : None, 'timestamp' : None, 'endleave' : False} if not indict and keywargs: # if keyword passed in instead of a dictionary - we use that indict = keywargs for keyword in arglist: # any keywords not specified we use the default if not indict.has_key(keyword): indict[keyword] = arglist[keyword] if indict['lower']: # keyword lower :-) instring = instring.lower() if indict['sha_hash']: instring = hashlib.sha1(instring).digest() if indict['daynumber'] == True: if not DATEIN: indict['daynumber'] = None else: a,b,c = returndate() indict['daynumber'] = daycount(a,b,c) # set the daycount to today if indict['timestamp']== True: if not DATEIN: indict['timestamp'] = None else: indict['timestamp'] = return_now() # set the time to now. datestamp = makestamp(indict['daynumber'], indict['timestamp']) if len(instring) == len(datestamp): instring = instring + '&mjf-end;' # otherwise we can't tell which is which when we unleave them later :-) outdata = binleave(instring, datestamp, indict['endleave']) return table_enc(outdata) # do the encoding of the actual string def pass_dec(incode): """Given a string encoded by pass_enc - it returns it decoded. It also extracts the datestamp and returns that. The return is : (instring, daynumber, timestamp) """ binary = table_dec(incode) out1, out2 = binunleave(binary) if len(out1) == 5: datestamp = out1 if out2.endswith('&mjf-end;'): out2 = out2[:-9] instring = out2 else: datestamp = out2 if out1.endswith('&mjf-end;'): out1 = out1[:-9] instring = out1 daynumber, timestamp = dec_datestamp(datestamp) return instring, daynumber, timestamp def expired(daynumber, timestamp, validity): """Given the length of time a password is valid for, it checks if a daynumber/timestamp tuple is still valid. validity should be an integer tuple (DAYS, HOURS, MINUTES). Returns True for valid or False for invalid. Needs the dateutils module to get the current daynumber. >>> a, b, c = returndate() >>> today = daycount(a, b, c) >>> h, m = return_now() >>> expired(today, (h, m-2), (0,0,1)) False >>> expired(today, (h, m-2), (0,0,10)) True >>> expired(today, (h-2, m-2), (0,1,10)) False >>> expired(today-1, (h-2, m-2), (1,1,10)) False >>> expired(today-1, (h-2, m-2), (2,1,10)) True >>> """ if not DATEIN: raise ImportError("Need the dateutils module to use the 'expired' function.") h1, m1 = timestamp d2, h2, m2 = validity a, b, c = returndate() today = daycount(a, b, c) h, m = return_now() h1 = h1 + h2 m1 = m1 + m2 daynumber = daynumber + d2 while m1 > 59: h1 += 1 m1 -= 60 while h1 > 23: daynumber += 1 h1 -= 24 daynumber += d2 if today > daynumber: return False if today < daynumber: return True if h > h1: # same day return False if h < h1: return True if m > m1: # same hour return False else: return True unexpired = expired # Technically unexpired is a better name since this function returns True if the timestamp is unexpired. def makestamp(daynumber, timestamp): """Receives a Julian daynumber (integer 1 to 16777215) and an (HOUR, MINUTES) tuple timestamp. Returns a 5 digit string of binary characters that represent that date/time. Can receive None for either or both of these arguments. The function 'daycount' in dateutils will turn a date into a daynumber. """ if not daynumber: datestamp = chr(0)*3 else: day1 = daynumber//65536 daynumber = daynumber % 65536 day2 = daynumber//256 daynumber = daynumber%256 datestamp = chr(day1) + chr(day2) + chr(daynumber) if not timestamp: datestamp = datestamp + chr(255)*2 else: datestamp = datestamp + chr(timestamp[0]) + chr(timestamp[1]) return datestamp def dec_datestamp(datestamp): """Given a 5 character datestamp made by makestamp, it returns it as the tuple : (daynumber, timestamp). daynumber and timestamp can either be None *or* daynumber is an integer between 1 and 16777215 timestamp is (HOUR, MINUTES) The function 'counttodate' in dateutils will turn a daynumber back into a date.""" daynumber = datestamp[:3] timechars = datestamp[3:] daynumber = ord(daynumber[0])*65536 + ord(daynumber[1])*256 + ord(daynumber[2]) if daynumber == 0: daynumber = None if ord(timechars[0]) == 255: timestamp = None else: timestamp = (ord(timechars[0]), ord(timechars[1])) return daynumber, timestamp def sixbit(invalue): """Given a value in it returns a list representing the base 64 version of that number. Each value in the list is an integer from 0-63... The first member of the list is the most significant figure... down to the remainder. Should only be used for positive values. """ if invalue < 1: # special case ! return [0] power = -1 outlist = [] test = 0 while test <= invalue: power += 1 test = pow(64,power) while power: power -= 1 outlist.append(int(invalue//pow(64,power))) invalue = invalue % pow(64,power) return outlist def sixtoeight(intuple): """Given four base 64 (6-bit) digits... it returns three 8 bit digits that represent the same value. If length of intuple != 4, or any digits are > 63, it returns None. **NOTE** Not quite the reverse of the sixbit function.""" if len(intuple) != 4: return None for entry in intuple: if entry > 63: return None value = intuple[3] + intuple[2]*64 + intuple[1]*4096 + intuple[0]*262144 val1 = value//65536 value = value % 65536 val2 = value//256 value = value % 256 return val1, val2, value def table_enc(instring, table=None): """The actual function that performs TABLE encoding. It takes instring in three character chunks (three 8 bit values) and turns it into 4 6 bit characters. Each of these 6 bit characters maps to a character in TABLE. If the length of instring is not divisible by three it is padded with Null bytes. The number of Null bytes to remove is then encoded as a semi-random character at the start of the string. You can pass in an alternative 64 character string to do the encoding with if you want. """ if table == None: table = TABLE out = [] test = len(instring) % 3 if test: instring = instring + chr(0)*(3-test) # make sure the length of instring is divisible by 3 while instring: chunk = instring[:3] instring = instring[3:] value = 65536 * ord(chunk[0]) + 256 * ord(chunk[1]) + ord(chunk[2]) newdat = sixbit(value) while len(newdat) < 4: newdat.insert(0, 0) for char in newdat: out.append(table[char]) if not test: out.insert(0, table[int(random()*21)]) # if we added 0 extra characters we add a character from 0 to 20 elif test == 1: out.insert(0, table[int(random()*21)+21]) # if we added 1 extra characters we add a character from 21 to 41 elif test == 2: out.insert(0, table[int(random()*22)+42]) # if we added 1 extra characters we add a character from 42 to 63 return ''.join(out) def table_dec(instring, table=None): """The function that performs TABLE decoding. Given a TABLE encoded string it returns the original binary data - as a string. If the data it's given is invalid (not data encoded by table_enc) it returns None (definition of invalid : not consisting of characters in the TABLE or length not len(instring) % 4 = 1). You can pass in an alternative 64 character string to do the decoding with if you want. """ if table == None: table = TABLE out = [] rem_test = table.find(instring[0]) # remove the length data at the end if rem_test == -1: return None instring = instring[1:] if len(instring)%4 != 0: return None # check the length is now divisible by 4 while instring: chunk = instring[:4] instring = instring[4:] newchunk = [] for char in chunk: test = table.find(char) if test == -1: return None newchunk.append(test) newchars = sixtoeight(newchunk) if not newchars: return None for char in newchars: out.append(chr(char)) if rem_test > 41: out = out[:-1] elif rem_test > 20: out = out[:-2] return ''.join(out) def return_now(): """Returns the time now. As (HOUR, MINUTES).""" return int(strftime('%I')), int(strftime('%M')) def check_pass(inhash, pswdhash, EXPIRE): """Given the hash (possibly from a webpage) it checks that it is still valid and matches the password it is supposed to have. If so it returns the new hash. If expired it returns -1. If the pass is invalid it returns False.""" try: instring, daynumber, timestamp = pass_dec(inhash) # of course a fake or mangled password will cause an exception here if not table_dec(pswdhash) == instring: return False if not unexpired(daynumber, timestamp, EXPIRE): # this tests if the hash is still valid and is the password hash the same as the password hash encoded in the page ? return -1 else: return pass_enc(instring, daynumber = True, timestamp = True) # generate a new hash, with the current time except: return False def binleave(data1, data2, endleave = False): """Given two strings of binary data it interleaves data1 into data2 on a bitwise basis and returns a single string combining both. (bits interleaved not just the bytes). The returned string will be 4 bytes or so longer than the two strings passed in. Use bin_unleave to return the two strings again. Even if both strings passed in are ascii - the result will contain non-ascii characters. To keep ascii-safe you must subsequently encode with table_enc. Max length for the smallest data string (one string can be of unlimited size) is about 16meg (increasing this would be easy if anyone needed it - but would be very slow anyway). If either string is empty (or the smallest string greater than 16meg) - we return None. The first 4 characters of the string returned 'define' the interleave. (actually the size of the watermark) For added safety you could remove this and send seperately. Version 1.0.0 used a bf (bitfield) object from the python cookbook. Version 1.1.0 uses the binary and & and or | operations and is about 2.5 times faster. On my AMD 3000, leaving and unleaving two 20k files took 1.8 seconds. (instead of 4.5 previously - with Psyco enabled this improved to 0.4 seconds.....) Interleaving a file with a watermark of pretty much any size makes it unreadable - this is because *every* byte is changed. (Except perhaps a few at the end - see the endleave keyword). However it shouldn't be relied on if you need a really secure method of encryption. For many purposes it will be sufficient however. In practise any file not an exact multiple of the size of the watermark will have a chunk at the end that is untouched. To get round this you can set endleave = True.. which then releaves the end data back into itself. (and therefore takes twice as long - it shouldn't be necessary where you have a short watermark.) data2 ought to be the smaller string - or they will be swapped round internally. This could cause you to get them back in an unexpected order from binunleave. """ header, out, data1 = internalfunc(data1,data2) header = chr(int(random()*128)) + header # making it a 4 byte header if endleave and data1 and len(data1) < 65536: header, out, data1 = internalfunc(header + out, data1) header = chr(int(random()*128)+ 128) + header return header + out + data1 def binunleave(data): """Given a chunk of data woven by binleave - it returns the two seperate pieces of data.""" header = data[0] data = data[1:] data1, data2 = internalfunc2(data) if ord(header) > 127: data = data2 + data1 data = data[1:] data1, data2 = internalfunc2(data) return data1, data2 class bf(object): """the bf(object) from activestate python cookbook - by Sebastien Keim - Many Thanks http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/113799""" def __init__(self,value=0): self._d = value def __getitem__(self, index): return (self._d >> index) & 1 def __setitem__(self,index,value): value = (value&1L)<<index mask = (1L)<<index self._d = (self._d & ~mask) | value def __getslice__(self, start, end): mask = 2L**(end - start) -1 return (self._d >> start) & mask def __setslice__(self, start, end, value): mask = 2L**(end - start) -1 value = (value & mask) << start mask = mask << start self._d = (self._d & ~mask) | value return (self._d >> start) & mask def __int__(self): return self._d def bittest(value, bitindex): """This function returns the setting of any bit from a value. bitindex starts at 0. """ return (value&(1<<bitindex))>>bitindex def bitset(value, bitindex, bit): """Sets a bit, specified by bitindex, in 'value' to 'bit'. bit should be 1 or 0 bitindex starts at 0. """ bit = (bit&1L)<<bitindex mask = (1L)<<bitindex return (value & ~mask) | bit # set that bit of value to 0 with an & operation and then or it with the 'bit' def internalfunc(data1, data2): """Used by binleave. This function interleaves data2 into data1 a little chunk at a time.""" if len(data2) > len(data1): # make sure that data1 always has the longer string, making data2 the watermark dummy = data1 data1 = data2 data2 = dummy if not data2 or not data1: return None # check for empty data length = len(data2) if length >= pow(2,24): return None # if the strings are oversized multiple = len(data1)//length # this is how often we should interleave bits if multiple > 65535: multiple = 65535 # in practise we'll set to max 65535 header1 = length//65536 header3 = length % 65536 header2 = header3//256 header3 = header3 % 256 header = chr(header1) + chr(header2) + chr(header3) # these are the 3 bytes we will put at the start of the string data1 = [ord(char) for char in list(data1)] startpos = 0 data2 = [ord(char) for char in list(data2)] BINLIST=[1,2,4,8,16,32,64,128] out = [] bitlen = multiple*8 + 8 # the total number of bits we'll have while data2: chunklist = data1[startpos:startpos + multiple] startpos = startpos + multiple heapobj = 0 mainobj = data2.pop(0) charobj = chunklist.pop(0) bitindex = 0 mainindex = 0 heapindex = 0 charindex = 0 while mainindex < bitlen: # print mainindex, heapindex, charindex, bitindex if heapindex == 8: # if we've got all 8 bit's out.append(chr(heapobj)) heapobj = 0 heapindex = 0 if not mainindex%(multiple+1): # we've got to a point where we should nick another bit from the byte if mainobj&BINLIST[bitindex]: # if the bit at binindex is set heapobj = heapobj|BINLIST[heapindex] # set the bit at heapindex heapindex += 1 bitindex += 1 mainindex += 1 continue if charindex == 7 and chunklist: # we've used up the current character from the chunk if charobj&BINLIST[charindex]: heapobj = heapobj|BINLIST[heapindex] charobj = chunklist.pop(0) charindex = 0 heapindex += 1 mainindex += 1 continue if charobj&BINLIST[charindex]: heapobj = heapobj|BINLIST[heapindex] heapindex += 1 charindex += 1 mainindex += 1 if heapindex == 8: # if we've got all 8 bit's.. but the loop has ended... out.append(chr(heapobj)) return header, ''.join(out), ''.join([chr(char) for char in data1[startpos:]]) def internalfunc2(data): """Used by binunleave. This function extracts data that has been interleaved using binleave.""" lenstr = data[:3] # extract the length of the watermark data = list(data[3:]) length2 = ord(lenstr[0])*65536 + ord(lenstr[1])*256 + ord(lenstr[2]) # length of watermark length1 = len(data) - length2 # overall length multiple = length1//length2 + 1 if multiple > 65536: multiple = 65536 # in practise we'll set to max 65535 + 1 bitlen = multiple*8 out1 = [] out = [] index = 0 BINLIST=[1,2,4,8,16,32,64,128] while index < length2: index += 1 chunk = data[:multiple] data = data[multiple:] chunklist = [ord(char) for char in chunk] # turn chunk into a list of it's values heapobj = 0 outbyte = 0 charobj = chunklist.pop(0) bitindex = 0 mainindex = 0 heapindex = 0 charindex = 0 while mainindex < bitlen: # print mainindex, heapindex, charindex, bitindex if heapindex == 8: # if we've got all 8 bit's out.append(chr(heapobj)) heapobj = 0 heapindex = 0 if not mainindex%multiple: # we've got to a point where we should add another bit to the byte if charobj&BINLIST[charindex]: outbyte = outbyte|BINLIST[bitindex] if not charindex == 7: charindex += 1 else: charobj = chunklist.pop(0) charindex = 0 bitindex += 1 mainindex += 1 continue if charindex == 7 and chunklist: # we've used up the current character from the chunk if charobj&BINLIST[charindex]: heapobj = heapobj|BINLIST[heapindex] charobj = chunklist.pop(0) charindex = 0 heapindex += 1 mainindex += 1 continue if charobj&BINLIST[charindex]: heapobj = heapobj|BINLIST[heapindex] heapindex += 1 charindex += 1 mainindex += 1 if heapindex == 8: # if we've got all 8 bit's.. but the loop has ended... out.append(chr(heapobj)) out1.append(chr(outbyte)) return ''.join(out1), ''.join(out+data) def test(): # the test suite from time import clock from os.path import exists print 'Printing the TABLE : ' index = 0 while index < len(TABLE): print TABLE[index], TABLE.find(TABLE[index]) index +=1 print '\nEnter test password to encode using table_enc :\n(Hit enter to continue past this)\n' while True: dummy = raw_input('>>...') if not dummy: break test = table_enc(dummy) test2 = table_dec(test) print test print 'length : ', len(test), ' modulo 4 of length - 1 : ', (len(test)-1) % 4 print 'Decoded : ', test2 print 'Length dec : ', len(test2) print '\nEnter password - to timestamp and then encode :\n(Hit enter to continue past this)\n' while True: instring = raw_input('>>...') if not instring: break code = pass_enc(instring, sha_hash=False, daynumber=True, timestamp=True) print code print pass_dec(code) print '\n\nTesting interleaving a 1000 byte random string with a 1500 byte random string :' print print 'Overall length of combined string : ', a=0 b='' c = '' while a < 1000: a += 1 b = b + chr(int(random()*256)) c = c + chr(int(random()*256)) while a < 1500: a += 1 c = c + chr(int(random()*256)) d = clock() test = binleave(c, b, True) print len(test) a1, a2 = binunleave(test) print 'Time taken (including print statements ;-) ', str(clock()-d)[:6], ' seconds' print 'Test for equality of extracted data against original :' print a1 == b print a2 == c if exists('test1.zip') and exists('test2.zip'): print print "Reading 'test1.zip' and 'test2.zip'" print "Interleaving them together and writing the combined file out as 'test3.zip'" print "Then unleaving them and writing 'test1.zip' back out as 'test4.zip'", print " to confirm it is unchanged by the process" a = file('test1.zip','rb') b = a.read() a.close() a = file('test2.zip','rb') c = a.read() a.close() d = clock() test = binleave(c,b, True) print len(test) a = file('test3.zip','wb') a.write(test) a.close() a1, a2 = binunleave(test) print str(clock()-d)[:6] a = file('test4.zip','wb') a.write(a1) a.close() else: print print 'Unable to perform final test.' print "We need two files to use for the test : 'test1.zip' and 'test2.zip'" print "We then interleave them together, and write the combined file out as 'test3.zip'" print "Then we unleave them again, and write 'test1.zip' back out as 'test4.zip'", print "(So we can confirm that it's unchanged by the process.)" if __name__ == '__main__': test() """ BUGS No more known bugs... yet. I'm sure they'll surface. ISSUES binleave and bin_unleave are still quite slow. For stamping small password hashes with a date stamp it's fast enough - for weaving larger files together it's *too slow*. Also for weaving similar sized files together we may be better with a pattern of 2 bits of water mark per 3 bits of string. (or a 3 to 4 or 5 to 7 etc..) Currrently it will only work with 1 bit of watermark per 1 or 2 or 3 or 4 etc bits of main string. (Exact multiples) Again, for small watermarks this works fine - and as that is all I'm using it for I'm not inclined to change it. The logic would be simple - just fiddly. TODO : Might make it a simple application - so it can be used from the command line for encoding, decoding timestamping and combining files..... Could replace use of the BINLIST and the if test with a single inline statement with more << >> in binleave and binunleave Could move the binleave and binuleave into C CHANGELOG 13-09-04 Version 1.1.5 Increased speed in table_enc and table_dec. 30-08-04 Version 1.1.4 Slight docs improvement. Slight speed improvement in binleave and binunleave. 22-08-04 Version 1.1.3 Added the unexpired alias and the check_pass function. Changed license text. Minor preemptive bugfix in some default values. 11-04-04 Version 1.1.2 Added the expired function for testing validity of timestamps. Changed the TABLE to be URL safe for passing in forms using the 'GET' method. Added OLD_TABLE with the old encoding, and gave table_dec and table_enc the ability to receive an explicit TABLE. 07-04-04 Version 1.1.1 Improved the tests a bit. Corrected a bug that affected large files or large files with small watermarks. 05-04-04 Version 1.1.0 Replaced the bf object with much faster bitwise logical operations. It is now about 2.5 times faster. With Psyco enabled it becomes 11 times faster than the first version.... Added the bit setting and testing operations as functions. 03-04-04 Version 1.0.0 Initial testing is a success. """
import _thread as thread import logging import operator import sys from queue import Empty from queue import Queue from threading import Lock from threading import Semaphore from threading import Thread from docker.errors import APIError from docker.errors import ImageNotFound from compose.cli.colors import AnsiMode from compose.cli.colors import green from compose.cli.colors import red from compose.cli.signals import ShutdownException from compose.const import PARALLEL_LIMIT from compose.errors import CompletedUnsuccessfully from compose.errors import HealthCheckFailed from compose.errors import NoHealthCheckConfigured from compose.errors import OperationFailedError log = logging.getLogger(__name__) STOP = object() class GlobalLimit: """Simple class to hold a global semaphore limiter for a project. This class should be treated as a singleton that is instantiated when the project is. """ global_limiter = Semaphore(PARALLEL_LIMIT) @classmethod def set_global_limit(cls, value): if value is None: value = PARALLEL_LIMIT cls.global_limiter = Semaphore(value) def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check): """ Watch events from a parallel execution, update status and fill errors and results. Returns exception to re-raise. """ error_to_reraise = None for obj, result, exception in events: if exception is None: if fail_check is not None and fail_check(obj): writer.write(msg, get_name(obj), 'failed', red) else: writer.write(msg, get_name(obj), 'done', green) results.append(result) elif isinstance(exception, ImageNotFound): # This is to bubble up ImageNotFound exceptions to the client so we # can prompt the user if they want to rebuild. errors[get_name(obj)] = exception.explanation writer.write(msg, get_name(obj), 'error', red) error_to_reraise = exception elif isinstance(exception, APIError): errors[get_name(obj)] = exception.explanation writer.write(msg, get_name(obj), 'error', red) elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured, CompletedUnsuccessfully)): errors[get_name(obj)] = exception.msg writer.write(msg, get_name(obj), 'error', red) elif isinstance(exception, UpstreamError): writer.write(msg, get_name(obj), 'error', red) else: errors[get_name(obj)] = exception error_to_reraise = exception return error_to_reraise def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None): """Runs func on objects in parallel while ensuring that func is ran on object only after it is ran on all its dependencies. get_deps called on object must return a collection with its dependencies. get_name called on object must return its name. fail_check is an additional failure check for cases that should display as a failure in the CLI logs, but don't raise an exception (such as attempting to start 0 containers) """ objects = list(objects) stream = sys.stderr writer = ParallelStreamWriter.get_or_assign_instance(ParallelStreamWriter(stream)) for obj in objects: writer.add_object(msg, get_name(obj)) for obj in objects: writer.write_initial(msg, get_name(obj)) events = parallel_execute_iter(objects, func, get_deps, limit) errors = {} results = [] error_to_reraise = parallel_execute_watch( events, writer, errors, results, msg, get_name, fail_check ) for obj_name, error in errors.items(): stream.write("\nERROR: for {} {}\n".format(obj_name, error)) if error_to_reraise: raise error_to_reraise return results, errors def _no_deps(x): return [] class State: """ Holds the state of a partially-complete parallel operation. state.started: objects being processed state.finished: objects which have been processed state.failed: objects which either failed or whose dependencies failed """ def __init__(self, objects): self.objects = objects self.started = set() self.finished = set() self.failed = set() def is_done(self): return len(self.finished) + len(self.failed) >= len(self.objects) def pending(self): return set(self.objects) - self.started - self.finished - self.failed class NoLimit: def __enter__(self): pass def __exit__(self, *ex): pass def parallel_execute_iter(objects, func, get_deps, limit): """ Runs func on objects in parallel while ensuring that func is ran on object only after it is ran on all its dependencies. Returns an iterator of tuples which look like: # if func returned normally when run on object (object, result, None) # if func raised an exception when run on object (object, None, exception) # if func raised an exception when run on one of object's dependencies (object, None, UpstreamError()) """ if get_deps is None: get_deps = _no_deps if limit is None: limiter = NoLimit() else: limiter = Semaphore(limit) results = Queue() state = State(objects) while True: feed_queue(objects, func, get_deps, results, state, limiter) try: event = results.get(timeout=0.1) except Empty: continue # See https://github.com/docker/compose/issues/189 except thread.error: raise ShutdownException() if event is STOP: break obj, _, exception = event if exception is None: log.debug('Finished processing: {}'.format(obj)) state.finished.add(obj) else: log.debug('Failed: {}'.format(obj)) state.failed.add(obj) yield event def producer(obj, func, results, limiter): """ The entry point for a producer thread which runs func on a single object. Places a tuple on the results queue once func has either returned or raised. """ with limiter, GlobalLimit.global_limiter: try: result = func(obj) results.put((obj, result, None)) except Exception as e: results.put((obj, None, e)) def feed_queue(objects, func, get_deps, results, state, limiter): """ Starts producer threads for any objects which are ready to be processed (i.e. they have no dependencies which haven't been successfully processed). Shortcuts any objects whose dependencies have failed and places an (object, None, UpstreamError()) tuple on the results queue. """ pending = state.pending() log.debug('Pending: {}'.format(pending)) for obj in pending: deps = get_deps(obj) try: if any(dep[0] in state.failed for dep in deps): log.debug('{} has upstream errors - not processing'.format(obj)) results.put((obj, None, UpstreamError())) state.failed.add(obj) elif all( dep not in objects or ( dep in state.finished and (not ready_check or ready_check(dep)) ) for dep, ready_check in deps ): log.debug('Starting producer thread for {}'.format(obj)) t = Thread(target=producer, args=(obj, func, results, limiter)) t.daemon = True t.start() state.started.add(obj) except (HealthCheckFailed, NoHealthCheckConfigured) as e: log.debug( 'Healthcheck for service(s) upstream of {} failed - ' 'not processing'.format(obj) ) results.put((obj, None, e)) except CompletedUnsuccessfully as e: log.debug( 'Service(s) upstream of {} did not completed successfully - ' 'not processing'.format(obj) ) results.put((obj, None, e)) if state.is_done(): results.put(STOP) class UpstreamError(Exception): pass class ParallelStreamWriter: """Write out messages for operations happening in parallel. Each operation has its own line, and ANSI code characters are used to jump to the correct line, and write over the line. """ default_ansi_mode = AnsiMode.AUTO write_lock = Lock() instance = None instance_lock = Lock() @classmethod def get_instance(cls): return cls.instance @classmethod def get_or_assign_instance(cls, writer): cls.instance_lock.acquire() try: if cls.instance is None: cls.instance = writer return cls.instance finally: cls.instance_lock.release() @classmethod def set_default_ansi_mode(cls, ansi_mode): cls.default_ansi_mode = ansi_mode def __init__(self, stream, ansi_mode=None): if ansi_mode is None: ansi_mode = self.default_ansi_mode self.stream = stream self.use_ansi_codes = ansi_mode.use_ansi_codes(stream) self.lines = [] self.width = 0 def add_object(self, msg, obj_index): if msg is None: return self.lines.append(msg + obj_index) self.width = max(self.width, len(msg + ' ' + obj_index)) def write_initial(self, msg, obj_index): if msg is None: return return self._write_noansi(msg, obj_index, '') def _write_ansi(self, msg, obj_index, status): self.write_lock.acquire() position = self.lines.index(msg + obj_index) diff = len(self.lines) - position # move up self.stream.write("%c[%dA" % (27, diff)) # erase self.stream.write("%c[2K\r" % 27) self.stream.write("{:<{width}} ... {}\r".format(msg + ' ' + obj_index, status, width=self.width)) # move back down self.stream.write("%c[%dB" % (27, diff)) self.stream.flush() self.write_lock.release() def _write_noansi(self, msg, obj_index, status): self.stream.write( "{:<{width}} ... {}\r\n".format( msg + ' ' + obj_index, status, width=self.width ) ) self.stream.flush() def write(self, msg, obj_index, status, color_func): if msg is None: return if self.use_ansi_codes: self._write_ansi(msg, obj_index, color_func(status)) else: self._write_noansi(msg, obj_index, status) def parallel_operation(containers, operation, options, message): parallel_execute( containers, operator.methodcaller(operation, **options), operator.attrgetter('name'), message, ) def parallel_remove(containers, options): stopped_containers = [c for c in containers if not c.is_running] parallel_operation(stopped_containers, 'remove', options, 'Removing') def parallel_pause(containers, options): parallel_operation(containers, 'pause', options, 'Pausing') def parallel_unpause(containers, options): parallel_operation(containers, 'unpause', options, 'Unpausing') def parallel_kill(containers, options): parallel_operation(containers, 'kill', options, 'Killing')
"""Tests for the storage media tool object.""" import argparse import io import os import unittest try: import win32console except ImportError: win32console = None from dfvfs.lib import definitions as dfvfs_definitions from dfvfs.lib import errors as dfvfs_errors from dfvfs.helpers import source_scanner from dfvfs.path import factory as path_spec_factory from dfvfs.resolver import resolver from dfvfs.volume import apfs_volume_system from dfvfs.volume import lvm_volume_system from dfvfs.volume import tsk_volume_system from dfvfs.volume import vshadow_volume_system from plaso.cli import storage_media_tool from plaso.cli import tools from plaso.lib import errors from tests.cli import test_lib class StorageMediaToolMediatorTest(test_lib.CLIToolTestCase): """Tests for the storage media tool mediator.""" # pylint: disable=protected-access def testFormatHumanReadableSize(self): """Tests the _FormatHumanReadableSize function.""" test_mediator = storage_media_tool.StorageMediaToolMediator() expected_size_string = '1000 B' size_string = test_mediator._FormatHumanReadableSize(1000) self.assertEqual(size_string, expected_size_string) expected_size_string = '1.0KiB / 1.0kB (1024 B)' size_string = test_mediator._FormatHumanReadableSize(1024) self.assertEqual(size_string, expected_size_string) expected_size_string = '976.6KiB / 1.0MB (1000000 B)' size_string = test_mediator._FormatHumanReadableSize(1000000) self.assertEqual(size_string, expected_size_string) expected_size_string = '1.0MiB / 1.0MB (1048576 B)' size_string = test_mediator._FormatHumanReadableSize(1048576) self.assertEqual(size_string, expected_size_string) def testPrintAPFSVolumeIdentifiersOverview(self): """Tests the _PrintAPFSVolumeIdentifiersOverview function.""" test_file_path = self._GetTestFilePath(['apfs.dmg']) self._SkipIfPathNotExists(test_file_path) test_os_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path) test_raw_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec) test_tsk_partition_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.PREFERRED_GPT_BACK_END, location='/p1', parent=test_raw_path_spec) test_apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_APFS_CONTAINER, location='/', parent=test_tsk_partition_path_spec) volume_system = apfs_volume_system.APFSVolumeSystem() volume_system.Open(test_apfs_container_path_spec) file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( output_writer=test_output_writer) test_mediator._PrintAPFSVolumeIdentifiersOverview(volume_system, ['apfs1']) file_object.seek(0, os.SEEK_SET) output_data = file_object.read() expected_output_data = [ b'The following Apple File System (APFS) volumes were found:', b'', b'Identifier Name', b'apfs1 SingleVolume', b'', b''] if not win32console: # Using join here since Python 3 does not support format of bytes. expected_output_data[2] = b''.join([ b'\x1b[1m', expected_output_data[2], b'\x1b[0m']) self.assertEqual(output_data.split(b'\n'), expected_output_data) def testPrintLVMVolumeIdentifiersOverview(self): """Tests the _PrintLVMVolumeIdentifiersOverview function.""" test_file_path = self._GetTestFilePath(['lvm.raw']) self._SkipIfPathNotExists(test_file_path) test_os_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path) test_raw_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec) test_lvm_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_LVM, location='/', parent=test_raw_path_spec) volume_system = lvm_volume_system.LVMVolumeSystem() volume_system.Open(test_lvm_path_spec) file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( output_writer=test_output_writer) test_mediator._PrintLVMVolumeIdentifiersOverview( volume_system, ['lvm1', 'lvm2']) file_object.seek(0, os.SEEK_SET) output_data = file_object.read() expected_output_data = [ b'The following Logical Volume Manager (LVM) volumes were found:', b'', b'Identifier', b'lvm1', b'lvm2', b'', b''] if not win32console: # Using join here since Python 3 does not support format of bytes. expected_output_data[2] = b''.join([ b'\x1b[1m', expected_output_data[2], b'\x1b[0m']) self.assertEqual(output_data.split(b'\n'), expected_output_data) def testPrintTSKPartitionIdentifiersOverview(self): """Tests the _PrintTSKPartitionIdentifiersOverview function.""" test_file_path = self._GetTestFilePath(['tsk_volume_system.raw']) self._SkipIfPathNotExists(test_file_path) test_os_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path) test_raw_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec) test_tsk_partition_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION, parent=test_raw_path_spec) volume_system = tsk_volume_system.TSKVolumeSystem() volume_system.Open(test_tsk_partition_path_spec) file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( output_writer=test_output_writer) test_mediator._PrintTSKPartitionIdentifiersOverview( volume_system, ['p1', 'p2']) file_object.seek(0, os.SEEK_SET) output_data = file_object.read() expected_output_data = [ b'The following partitions were found:', b'', b'Identifier Offset (in bytes) Size (in bytes)', (b'p1 512 (0x00000200) 175.0KiB / 179.2kB ' b'(179200 B)'), b'p2 180224 (0x0002c000) 1.2MiB / 1.3MB (1294336 B)', b'', b''] if not win32console: # Using join here since Python 3 does not support format of bytes. expected_output_data[2] = b''.join([ b'\x1b[1m', expected_output_data[2], b'\x1b[0m']) self.assertEqual(output_data.split(b'\n'), expected_output_data) def testPrintVSSStoreIdentifiersOverview(self): """Tests the _PrintVSSStoreIdentifiersOverview function.""" test_file_path = self._GetTestFilePath(['vsstest.qcow2']) self._SkipIfPathNotExists(test_file_path) test_os_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path) test_qcow_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=test_os_path_spec) test_vss_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_VSHADOW, parent=test_qcow_path_spec) volume_system = vshadow_volume_system.VShadowVolumeSystem() volume_system.Open(test_vss_path_spec) file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( output_writer=test_output_writer) test_mediator._PrintVSSStoreIdentifiersOverview( volume_system, ['vss1', 'vss2']) file_object.seek(0, os.SEEK_SET) output_data = file_object.read() expected_output_data = [ b'The following Volume Shadow Snapshots (VSS) were found:', b'', b'Identifier Creation Time', b'vss1 2013-12-03 06:35:09.7363787', b'vss2 2013-12-03 06:37:48.9190583', b'', b''] if not win32console: # Using join here since Python 3 does not support format of bytes. expected_output_data[2] = b''.join([ b'\x1b[1m', expected_output_data[2], b'\x1b[0m']) self.assertEqual(output_data.split(b'\n'), expected_output_data) # TODO: add tests for _ReadSelectedVolumes def testParseVolumeIdentifiersString(self): """Tests the ParseVolumeIdentifiersString function.""" test_mediator = storage_media_tool.StorageMediaToolMediator() volume_identifiers = test_mediator.ParseVolumeIdentifiersString('') self.assertEqual(volume_identifiers, []) volume_identifiers = test_mediator.ParseVolumeIdentifiersString('all') self.assertEqual(volume_identifiers, ['all']) volume_identifiers = test_mediator.ParseVolumeIdentifiersString('v1') self.assertEqual(volume_identifiers, ['v1']) volume_identifiers = test_mediator.ParseVolumeIdentifiersString('1') self.assertEqual(volume_identifiers, ['v1']) volume_identifiers = test_mediator.ParseVolumeIdentifiersString('1,3') self.assertEqual(volume_identifiers, ['v1', 'v3']) volume_identifiers = test_mediator.ParseVolumeIdentifiersString('1..3') self.assertEqual(volume_identifiers, ['v1', 'v2', 'v3']) volume_identifiers = test_mediator.ParseVolumeIdentifiersString('v1..v3') self.assertEqual(volume_identifiers, ['v1', 'v2', 'v3']) volume_identifiers = test_mediator.ParseVolumeIdentifiersString('1..3,5') self.assertEqual(volume_identifiers, ['v1', 'v2', 'v3', 'v5']) with self.assertRaises(ValueError): test_mediator.ParseVolumeIdentifiersString('bogus') with self.assertRaises(ValueError): test_mediator.ParseVolumeIdentifiersString('1..bogus') def testGetAPFSVolumeIdentifiers(self): """Tests the GetAPFSVolumeIdentifiers function.""" test_file_path = self._GetTestFilePath(['apfs.dmg']) self._SkipIfPathNotExists(test_file_path) test_os_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path) test_raw_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec) test_tsk_partition_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.PREFERRED_GPT_BACK_END, location='/p1', parent=test_raw_path_spec) test_apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_APFS_CONTAINER, location='/', parent=test_tsk_partition_path_spec) volume_system = apfs_volume_system.APFSVolumeSystem() volume_system.Open(test_apfs_container_path_spec) # Test selection of single volume. input_file_object = io.BytesIO(b'1\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetAPFSVolumeIdentifiers( volume_system, ['apfs1']) self.assertEqual(volume_identifiers, ['apfs1']) # Test selection of single volume. input_file_object = io.BytesIO(b'apfs1\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetAPFSVolumeIdentifiers( volume_system, ['apfs1']) self.assertEqual(volume_identifiers, ['apfs1']) # Test selection of single volume with invalid input on first attempt. input_file_object = io.BytesIO(b'bogus\napfs1\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetAPFSVolumeIdentifiers( volume_system, ['apfs1']) self.assertEqual(volume_identifiers, ['apfs1']) # Test selection of all volumes. input_file_object = io.BytesIO(b'all\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetAPFSVolumeIdentifiers( volume_system, ['apfs1']) self.assertEqual(volume_identifiers, ['apfs1']) # Test selection of no volumes. input_file_object = io.BytesIO(b'\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetAPFSVolumeIdentifiers( volume_system, ['apfs1']) self.assertEqual(volume_identifiers, []) def testGetLVMVolumeIdentifiers(self): """Tests the GetLVMVolumeIdentifiers function.""" test_file_path = self._GetTestFilePath(['lvm.raw']) self._SkipIfPathNotExists(test_file_path) test_os_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path) test_raw_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec) test_lvm_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_LVM, location='/', parent=test_raw_path_spec) volume_system = lvm_volume_system.LVMVolumeSystem() volume_system.Open(test_lvm_path_spec) # Test selection of single volume. input_file_object = io.BytesIO(b'1\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetLVMVolumeIdentifiers( volume_system, ['lvm1']) self.assertEqual(volume_identifiers, ['lvm1']) # Test selection of single volume. input_file_object = io.BytesIO(b'lvm1\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetLVMVolumeIdentifiers( volume_system, ['lvm1']) self.assertEqual(volume_identifiers, ['lvm1']) # Test selection of single volume with invalid input on first attempt. input_file_object = io.BytesIO(b'bogus\nlvm1\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetLVMVolumeIdentifiers( volume_system, ['lvm1']) self.assertEqual(volume_identifiers, ['lvm1']) # Test selection of all volumes. input_file_object = io.BytesIO(b'all\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetLVMVolumeIdentifiers( volume_system, ['lvm1', 'lvm2']) self.assertEqual(volume_identifiers, ['lvm1', 'lvm2']) # Test selection of no volumes. input_file_object = io.BytesIO(b'\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetLVMVolumeIdentifiers( volume_system, ['lvm1']) self.assertEqual(volume_identifiers, []) def testGetPartitionIdentifiers(self): """Tests the GetPartitionIdentifiers function.""" test_file_path = self._GetTestFilePath(['tsk_volume_system.raw']) self._SkipIfPathNotExists(test_file_path) test_os_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path) test_raw_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec) test_tsk_partition_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION, parent=test_raw_path_spec) volume_system = tsk_volume_system.TSKVolumeSystem() volume_system.Open(test_tsk_partition_path_spec) file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( output_writer=test_output_writer) # Test selection of single partition. input_file_object = io.BytesIO(b'2\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetPartitionIdentifiers( volume_system, ['p1', 'p2']) self.assertEqual(volume_identifiers, ['p2']) # Test selection of single partition. input_file_object = io.BytesIO(b'p2\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetPartitionIdentifiers( volume_system, ['p1', 'p2']) self.assertEqual(volume_identifiers, ['p2']) # Test selection of single partition with invalid input on first attempt. input_file_object = io.BytesIO(b'bogus\np2\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetPartitionIdentifiers( volume_system, ['p1', 'p2']) self.assertEqual(volume_identifiers, ['p2']) # Test selection of all partitions. input_file_object = io.BytesIO(b'all\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetPartitionIdentifiers( volume_system, ['p1', 'p2']) self.assertEqual(volume_identifiers, ['p1', 'p2']) # TODO: test selection of no partitions. # TODO: add test for PromptUserForVSSCurrentVolume. def testGetVSSStoreIdentifiers(self): """Tests the GetVSSStoreIdentifiers function.""" test_file_path = self._GetTestFilePath(['vsstest.qcow2']) self._SkipIfPathNotExists(test_file_path) test_os_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path) test_qcow_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=test_os_path_spec) test_vss_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_VSHADOW, parent=test_qcow_path_spec) volume_system = vshadow_volume_system.VShadowVolumeSystem() volume_system.Open(test_vss_path_spec) file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( output_writer=test_output_writer) # Test selection of single store. input_file_object = io.BytesIO(b'2\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetVSSStoreIdentifiers( volume_system, ['vss1', 'vss2']) self.assertEqual(volume_identifiers, ['vss2']) # Test selection of single store. input_file_object = io.BytesIO(b'vss2\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetVSSStoreIdentifiers( volume_system, ['vss1', 'vss2']) self.assertEqual(volume_identifiers, ['vss2']) # Test selection of single store with invalid input on first attempt. input_file_object = io.BytesIO(b'bogus\nvss2\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetVSSStoreIdentifiers( volume_system, ['vss1', 'vss2']) self.assertEqual(volume_identifiers, ['vss2']) # Test selection of all stores. input_file_object = io.BytesIO(b'all\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetVSSStoreIdentifiers( volume_system, ['vss1', 'vss2']) self.assertEqual(volume_identifiers, ['vss1', 'vss2']) # Test selection of no stores. input_file_object = io.BytesIO(b'\n') test_input_reader = tools.FileObjectInputReader(input_file_object) output_file_object = io.BytesIO() test_output_writer = tools.FileObjectOutputWriter(output_file_object) test_mediator = storage_media_tool.StorageMediaToolMediator( input_reader=test_input_reader, output_writer=test_output_writer) volume_identifiers = test_mediator.GetVSSStoreIdentifiers( volume_system, ['vss1', 'vss2']) self.assertEqual(volume_identifiers, []) class StorageMediaToolVolumeScannerTest(test_lib.CLIToolTestCase): """Tests for the storage media volume scanner.""" # pylint: disable=protected-access _APFS_PASSWORD = 'apfs-TEST' _BDE_PASSWORD = 'bde-TEST' def _GetTestScanNode(self, scan_context): """Retrieves the scan node for testing. Retrieves the first scan node, from the root upwards, with more or less than 1 sub node. Args: scan_context (dfvfs.ScanContext): scan context. Returns: dfvfs.SourceScanNode: scan node. """ scan_node = scan_context.GetRootScanNode() while len(scan_node.sub_nodes) == 1: scan_node = scan_node.sub_nodes[0] return scan_node def _TestScanSourceAPFSImage(self, source_path): """Tests the ScanSource function on an APFS image. Args: source_path (str): path of the source device, directory or file. """ test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.credentials = [('password', '{0:s}'.format(self._APFS_PASSWORD))] options.scan_mode = options.SCAN_MODE_ALL options.partitions = ['all'] options.volumes = ['all'] base_path_specs = [] scan_context = test_scanner.ScanSource( source_path, options, base_path_specs) self.assertIsNotNone(scan_context) scan_node = scan_context.GetRootScanNode() scan_node = scan_node.sub_nodes[0].sub_nodes[0] self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.PREFERRED_GPT_BACK_END) if dfvfs_definitions.PREFERRED_GPT_BACK_END == ( dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION): expected_number_of_sub_nodes = 6 else: expected_number_of_sub_nodes = 1 self.assertEqual(len(scan_node.sub_nodes), expected_number_of_sub_nodes) for scan_node in scan_node.sub_nodes: if getattr(scan_node.path_spec, 'location', None) == '/p1': break self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.PREFERRED_GPT_BACK_END) self.assertEqual(len(scan_node.sub_nodes), 1) path_spec = scan_node.path_spec if dfvfs_definitions.PREFERRED_GPT_BACK_END == ( dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION): self.assertEqual(path_spec.start_offset, 20480) scan_node = scan_node.sub_nodes[0] self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_APFS_CONTAINER) self.assertEqual(len(scan_node.sub_nodes), 1) scan_node = scan_node.sub_nodes[0] self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_APFS_CONTAINER) self.assertEqual(len(scan_node.sub_nodes), 1) scan_node = scan_node.sub_nodes[0] self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_APFS) def _TestScanSourceDirectory(self, source_path): """Tests the ScanSource function on a directory. Args: source_path (str): path of the source device, directory or file. """ test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_ALL base_path_specs = [] scan_context = test_scanner.ScanSource( source_path, options, base_path_specs) self.assertIsNotNone(scan_context) scan_node = scan_context.GetRootScanNode() self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_OS) path_spec = scan_node.path_spec self.assertEqual(path_spec.location, os.path.abspath(source_path)) def _TestScanSourceImage(self, source_path): """Tests the ScanSource function on an image containing a single partition. Args: source_path (str): path of the source device, directory or file. """ test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_ALL base_path_specs = [] scan_context = test_scanner.ScanSource( source_path, options, base_path_specs) self.assertIsNotNone(scan_context) scan_node = self._GetTestScanNode(scan_context) self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.PREFERRED_EXT_BACK_END) def _TestScanSourceLVMImage(self, source_path): """Tests the ScanSource function on a LVM image. Args: source_path (str): path of the source device, directory or file. """ test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_ALL options.partitions = ['all'] options.volumes = ['all'] base_path_specs = [] scan_context = test_scanner.ScanSource( source_path, options, base_path_specs) self.assertIsNotNone(scan_context) scan_node = self._GetTestScanNode(scan_context) self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_LVM) self.assertEqual(len(scan_node.sub_nodes), 2) scan_node = scan_node.sub_nodes[0] self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_LVM) self.assertEqual(len(scan_node.sub_nodes), 1) scan_node = scan_node.sub_nodes[0] self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.PREFERRED_EXT_BACK_END) def _TestScanSourcePartitionedImage(self, source_path): """Tests the ScanSource function on an image containing multiple partitions. Args: source_path (str): path of the source device, directory or file. """ test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_ALL options.partitions = ['all'] base_path_specs = [] scan_context = test_scanner.ScanSource( source_path, options, base_path_specs) self.assertIsNotNone(scan_context) scan_node = self._GetTestScanNode(scan_context) self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION) self.assertEqual(len(scan_node.sub_nodes), 7) for scan_node in scan_node.sub_nodes: if getattr(scan_node.path_spec, 'location', None) == '/p2': break self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION) self.assertEqual(len(scan_node.sub_nodes), 1) path_spec = scan_node.path_spec self.assertEqual(path_spec.start_offset, 180224) scan_node = scan_node.sub_nodes[0] self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.PREFERRED_EXT_BACK_END) def _TestScanSourceVSSImage(self, source_path): """Tests the ScanSource function on a VSS storage media image. Args: source_path (str): path of the source device, directory or file. """ test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_ALL options.partitions = ['all'] options.snapshots = ['all'] options.volumes = ['all'] base_path_specs = [] scan_context = test_scanner.ScanSource( source_path, options, base_path_specs) self.assertIsNotNone(scan_context) scan_node = self._GetTestScanNode(scan_context) self.assertIsNotNone(scan_node) self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_QCOW) self.assertEqual(len(scan_node.sub_nodes), 2) volume_scan_node = scan_node scan_node = volume_scan_node.sub_nodes[0] self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_VSHADOW) self.assertEqual(len(scan_node.sub_nodes), 2) scan_node = scan_node.sub_nodes[0] self.assertEqual( scan_node.type_indicator, dfvfs_definitions.TYPE_INDICATOR_VSHADOW) # By default the file system inside a VSS volume is not scanned. self.assertEqual(len(scan_node.sub_nodes), 0) scan_node = volume_scan_node.sub_nodes[1] self.assertEqual( scan_node.type_indicator, dfvfs_definitions.PREFERRED_NTFS_BACK_END) def testScanEncryptedVolumeOnBDE(self): """Tests the _ScanEncryptedVolume function on a BDE image.""" test_file_path = self._GetTestFilePath(['bdetogo.raw']) self._SkipIfPathNotExists(test_file_path) resolver.Resolver.key_chain.Empty() test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.credentials = [('password', self._BDE_PASSWORD)] scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file_path) test_scanner._source_scanner.Scan(scan_context) scan_node = self._GetTestScanNode(scan_context) bde_scan_node = scan_node.sub_nodes[0] test_scanner._ScanEncryptedVolume(scan_context, bde_scan_node, options) def testScanVolumeSystemRoot(self): """Tests the _ScanVolumeSystemRoot function.""" test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_ALL scan_context = source_scanner.SourceScannerContext() # Test error conditions. with self.assertRaises(dfvfs_errors.ScannerError): test_scanner._ScanVolumeSystemRoot(scan_context, None, options, []) scan_node = source_scanner.SourceScanNode(None) with self.assertRaises(dfvfs_errors.ScannerError): test_scanner._ScanVolumeSystemRoot(scan_context, scan_node, options, []) def testScanVolumeSystemRootOnAPFS(self): """Tests the _ScanVolumeSystemRoot function on an APFS image.""" test_file_path = self._GetTestFilePath(['apfs.dmg']) self._SkipIfPathNotExists(test_file_path) resolver.Resolver.key_chain.Empty() test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_ALL options.volumes = ['all'] scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file_path) test_scanner._source_scanner.Scan(scan_context) scan_node = scan_context.GetRootScanNode() scan_node = scan_node.sub_nodes[0].sub_nodes[0] if dfvfs_definitions.PREFERRED_GPT_BACK_END == ( dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION): apfs_container_scan_node = scan_node.sub_nodes[4].sub_nodes[0] else: apfs_container_scan_node = scan_node.sub_nodes[0].sub_nodes[0] base_path_specs = [] test_scanner._ScanVolumeSystemRoot( scan_context, apfs_container_scan_node, options, base_path_specs) self.assertEqual(len(base_path_specs), 1) # Test error conditions. with self.assertRaises(dfvfs_errors.ScannerError): test_scanner._ScanVolumeSystemRoot( scan_context, apfs_container_scan_node.sub_nodes[0], options, base_path_specs) def testScanVolumeSystemRootOnLVM(self): """Tests the _ScanVolumeSystemRoot function on a LVM image.""" test_file_path = self._GetTestFilePath(['lvm.raw']) self._SkipIfPathNotExists(test_file_path) resolver.Resolver.key_chain.Empty() test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_ALL options.volumes = ['all'] scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file_path) test_scanner._source_scanner.Scan(scan_context) lvm_scan_node = self._GetTestScanNode(scan_context) base_path_specs = [] test_scanner._ScanVolumeSystemRoot( scan_context, lvm_scan_node, options, base_path_specs) self.assertEqual(len(base_path_specs), 1) # Test error conditions. with self.assertRaises(dfvfs_errors.ScannerError): test_scanner._ScanVolumeSystemRoot( scan_context, lvm_scan_node.sub_nodes[0], options, base_path_specs) def testScanVolumeSystemRootOnPartitionedImage(self): """Tests the _ScanVolumeSystemRoot function on a partitioned image.""" test_file_path = self._GetTestFilePath(['tsk_volume_system.raw']) self._SkipIfPathNotExists(test_file_path) test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_ALL scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file_path) test_scanner._source_scanner.Scan(scan_context) scan_node = self._GetTestScanNode(scan_context) # Test error conditions. with self.assertRaises(dfvfs_errors.ScannerError): test_scanner._ScanVolumeSystemRoot(scan_context, scan_node, options, []) def testScanVolumeSystemRootOnVSS(self): """Tests the _ScanVolumeSystemRoot function on VSS.""" test_file_path = self._GetTestFilePath(['vsstest.qcow2']) self._SkipIfPathNotExists(test_file_path) test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_SNAPSHOTS_ONLY options.snapshots = ['all'] scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file_path) test_scanner._source_scanner.Scan(scan_context) scan_node = self._GetTestScanNode(scan_context) vss_scan_node = scan_node.sub_nodes[0] base_path_specs = [] test_scanner._ScanVolumeSystemRoot( scan_context, vss_scan_node, options, base_path_specs) self.assertEqual(len(base_path_specs), 2) def testScanVolumeSystemRootOnVSSDisabled(self): """Tests the _ScanVolumeSystemRoot function on VSS with VSS turned off.""" test_file_path = self._GetTestFilePath(['vsstest.qcow2']) self._SkipIfPathNotExists(test_file_path) test_scanner = storage_media_tool.StorageMediaToolVolumeScanner() options = storage_media_tool.StorageMediaToolVolumeScannerOptions() options.scan_mode = options.SCAN_MODE_ALL options.snapshots = ['none'] scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file_path) test_scanner._source_scanner.Scan(scan_context) scan_node = self._GetTestScanNode(scan_context) vss_scan_node = scan_node.sub_nodes[0] base_path_specs = [] test_scanner._ScanVolumeSystemRoot( scan_context, vss_scan_node, options, base_path_specs) self.assertEqual(len(base_path_specs), 0) def testScanSourceAPFS(self): """Tests the ScanSource function on an APFS image.""" source_path = self._GetTestFilePath(['apfs.dmg']) self._SkipIfPathNotExists(source_path) self._TestScanSourceAPFSImage(source_path) def testScanSourceEncryptedAPFS(self): """Tests the ScanSource function on an encrypted APFS image.""" resolver.Resolver.key_chain.Empty() source_path = self._GetTestFilePath(['apfs_encrypted.dmg']) self._SkipIfPathNotExists(source_path) self._TestScanSourceAPFSImage(source_path) def testScanSourcePartitionedImage(self): """Tests the ScanSource function on a partitioned image.""" source_path = self._GetTestFilePath(['tsk_volume_system.raw']) self._SkipIfPathNotExists(source_path) self._TestScanSourcePartitionedImage(source_path) def testScanSourceSplitEWF(self): """Tests the ScanSource function on a split EWF image.""" source_path = self._GetTestFilePath(['image-split.E01']) self._SkipIfPathNotExists(source_path) self._TestScanSourcePartitionedImage(source_path) def testScanSourceEWF(self): """Tests the ScanSource function on an EWF image.""" source_path = self._GetTestFilePath(['image.E01']) self._SkipIfPathNotExists(source_path) self._TestScanSourceImage(source_path) def testScanSourceLVM(self): """Tests the ScanSource function on a LVM image.""" source_path = self._GetTestFilePath(['lvm.raw']) self._SkipIfPathNotExists(source_path) self._TestScanSourceLVMImage(source_path) def testScanSourceNonExisitingFile(self): """Tests the ScanSource function on a non existing file.""" with self.assertRaises(dfvfs_errors.ScannerError): source_path = self._GetTestFilePath(['nosuchfile.raw']) self._TestScanSourceImage(source_path) def testScanSourceQCOW(self): """Tests the ScanSource function on a QCOW image.""" source_path = self._GetTestFilePath(['image.qcow2']) self._SkipIfPathNotExists(source_path) self._TestScanSourceImage(source_path) def testScanSourceTextDirectory(self): """Tests the ScanSource function on a directory.""" source_path = self._GetTestFilePath(['text_parser']) self._SkipIfPathNotExists(source_path) self._TestScanSourceDirectory(source_path) def testScanSourceVHDI(self): """Tests the ScanSource function on a VHD image.""" source_path = self._GetTestFilePath(['image.vhd']) self._SkipIfPathNotExists(source_path) self._TestScanSourceImage(source_path) def testScanSourceVMDK(self): """Tests the ScanSource function on a VMDK image.""" source_path = self._GetTestFilePath(['image.vmdk']) self._SkipIfPathNotExists(source_path) self._TestScanSourceImage(source_path) def testScanSourceVSS(self): """Tests the ScanSource function on a VSS.""" source_path = self._GetTestFilePath(['vsstest.qcow2']) self._SkipIfPathNotExists(source_path) self._TestScanSourceVSSImage(source_path) class StorageMediaToolTest(test_lib.CLIToolTestCase): """Tests for the storage media tool.""" # pylint: disable=protected-access _APFS_PASSWORD = 'apfs-TEST' _BDE_PASSWORD = 'bde-TEST' _EXPECTED_OUTPUT_CREDENTIAL_OPTIONS = """\ usage: storage_media_tool_test.py [--credential TYPE:DATA] Test argument parser. {0:s}: --credential TYPE:DATA Define a credentials that can be used to unlock encrypted volumes e.g. BitLocker. The credential is defined as type:data e.g. "password:BDE-test". Supported credential types are: key_data, password, recovery_password, startup_key. Binary key data is expected to be passed in BASE-16 encoding (hexadecimal). WARNING credentials passed via command line arguments can end up in logs, so use this option with care. """.format(test_lib.ARGPARSE_OPTIONS) _EXPECTED_OUTPUT_STORAGE_MEDIA_OPTIONS = """\ usage: storage_media_tool_test.py [--partitions PARTITIONS] [--volumes VOLUMES] Test argument parser. {0:s}: --partitions PARTITIONS, --partition PARTITIONS Define partitions to be processed. A range of partitions can be defined as: "3..5". Multiple partitions can be defined as: "1,3,5" (a list of comma separated values). Ranges and lists can also be combined as: "1,3..5". The first partition is 1. All partitions can be specified with: "all". --volumes VOLUMES, --volume VOLUMES Define volumes to be processed. A range of volumes can be defined as: "3..5". Multiple volumes can be defined as: "1,3,5" (a list of comma separated values). Ranges and lists can also be combined as: "1,3..5". The first volume is 1. All volumes can be specified with: "all". """.format(test_lib.ARGPARSE_OPTIONS) _EXPECTED_OUTPUT_VSS_PROCESSING_OPTIONS = """\ usage: storage_media_tool_test.py [--no_vss] [--vss_only] [--vss_stores VSS_STORES] Test argument parser. {0:s}: --no_vss, --no-vss Do not scan for Volume Shadow Snapshots (VSS). This means that Volume Shadow Snapshots (VSS) are not processed. --vss_only, --vss-only Do not process the current volume if Volume Shadow Snapshots (VSS) have been selected. --vss_stores VSS_STORES, --vss-stores VSS_STORES Define Volume Shadow Snapshots (VSS) (or stores that need to be processed. A range of stores can be defined as: "3..5". Multiple stores can be defined as: "1,3,5" (a list of comma separated values). Ranges and lists can also be combined as: "1,3..5". The first store is 1. All stores can be defined as: "all". """.format(test_lib.ARGPARSE_OPTIONS) def _GetTestScanNode(self, scan_context): """Retrieves the scan node for testing. Retrieves the first scan node, from the root upwards, with more or less than 1 sub node. Args: scan_context (dfvfs.ScanContext): scan context. Returns: dfvfs.SourceScanNode: scan node. """ scan_node = scan_context.GetRootScanNode() while len(scan_node.sub_nodes) == 1: scan_node = scan_node.sub_nodes[0] return scan_node def testParseCredentialOptions(self): """Tests the _ParseCredentialOptions function.""" test_tool = storage_media_tool.StorageMediaTool() options = test_lib.TestOptions() test_tool._ParseCredentialOptions(options) # TODO: improve test coverage. def testParseSourcePathOption(self): """Tests the _ParseSourcePathOption function.""" test_file_path = self._GetTestFilePath(['ímynd.dd']) self._SkipIfPathNotExists(test_file_path) test_tool = storage_media_tool.StorageMediaTool() options = test_lib.TestOptions() with self.assertRaises(errors.BadConfigOption): test_tool._ParseSourcePathOption(options) options.source = test_file_path test_tool._ParseSourcePathOption(options) def testParseStorageMediaOptions(self): """Tests the _ParseStorageMediaOptions function.""" test_file_path = self._GetTestFilePath(['ímynd.dd']) self._SkipIfPathNotExists(test_file_path) test_tool = storage_media_tool.StorageMediaTool() options = test_lib.TestOptions() options.partitions = 'all' options.source = test_file_path test_tool._ParseStorageMediaImageOptions(options) def testParseStorageMediaImageOptions(self): """Tests the _ParseStorageMediaImageOptions function.""" test_tool = storage_media_tool.StorageMediaTool() options = test_lib.TestOptions() options.partitions = 'all' test_tool._ParseStorageMediaImageOptions(options) # TODO: improve test coverage. def testParseVSSProcessingOptions(self): """Tests the _ParseVSSProcessingOptions function.""" test_tool = storage_media_tool.StorageMediaTool() options = test_lib.TestOptions() test_tool._ParseVSSProcessingOptions(options) # TODO: improve test coverage. def testAddCredentialOptions(self): """Tests the AddCredentialOptions function.""" argument_parser = argparse.ArgumentParser( prog='storage_media_tool_test.py', description='Test argument parser.', add_help=False, formatter_class=test_lib.SortedArgumentsHelpFormatter) test_tool = storage_media_tool.StorageMediaTool() test_tool.AddCredentialOptions(argument_parser) output = self._RunArgparseFormatHelp(argument_parser) self.assertEqual(output, self._EXPECTED_OUTPUT_CREDENTIAL_OPTIONS) def testAddStorageMediaImageOptions(self): """Tests the AddStorageMediaImageOptions function.""" argument_parser = argparse.ArgumentParser( prog='storage_media_tool_test.py', description='Test argument parser.', add_help=False, formatter_class=test_lib.SortedArgumentsHelpFormatter) test_tool = storage_media_tool.StorageMediaTool() test_tool.AddStorageMediaImageOptions(argument_parser) output = self._RunArgparseFormatHelp(argument_parser) self.assertEqual(output, self._EXPECTED_OUTPUT_STORAGE_MEDIA_OPTIONS) def testAddVSSProcessingOptions(self): """Tests the AddVSSProcessingOptions function.""" argument_parser = argparse.ArgumentParser( prog='storage_media_tool_test.py', description='Test argument parser.', add_help=False, formatter_class=test_lib.SortedArgumentsHelpFormatter) test_tool = storage_media_tool.StorageMediaTool() test_tool.AddVSSProcessingOptions(argument_parser) output = self._RunArgparseFormatHelp(argument_parser) self.assertEqual(output, self._EXPECTED_OUTPUT_VSS_PROCESSING_OPTIONS) if __name__ == '__main__': unittest.main()
''' Created on Sept 26, 2013 @author: Rafael Nunes ''' def webapp_add_wsgi_middleware(app): from google.appengine.ext.appstats import recording app = recording.appstats_wsgi_middleware(app) return app
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals) from pants_test.pants_run_integration_test import PantsRunIntegrationTest class ThriftLinterTest(PantsRunIntegrationTest): def test_good(self): # thrift-linter should pass without warnings with correct thrift files. cmd = ['goal', 'thrift-linter', 'testprojects/src/thrift/com/pants/thrift_linter:good-thrift'] pants_run = self.run_pants(cmd) self.assert_success(pants_run) self.assertFalse('Lint errors found!' in pants_run.stdout_data) def test_bad_default(self): # thrift-linter fails on linter errors. cmd = ['goal', 'thrift-linter', 'testprojects/src/thrift/com/pants/thrift_linter:bad-thrift-default'] pants_run = self.run_pants(cmd) self.assert_success(pants_run) self.assertTrue('Lint errors found!' in pants_run.stdout_data) def test_bad_strict(self): # thrift-linter fails on linter errors (BUILD target defines thrift_linter_strict=True) cmd = ['goal', 'thrift-linter', 'testprojects/src/thrift/com/pants/thrift_linter:bad-thrift-strict'] pants_run = self.run_pants(cmd) self.assert_failure(pants_run) self.assertTrue('Lint errors found!' in pants_run.stdout_data) def test_bad_non_strict(self): # thrift-linter fails on linter errors (BUILD target defines thrift_linter_strict=False) cmd = ['goal', 'thrift-linter', 'testprojects/src/thrift/com/pants/thrift_linter:bad-thrift-non-strict'] pants_run = self.run_pants(cmd) self.assert_success(pants_run) self.assertTrue('Lint errors found!' in pants_run.stdout_data) def test_bad_default_override(self): # thrift-linter fails with command line flag overriding the BUILD section. cmd = ['goal', 'thrift-linter', 'testprojects/src/thrift/com/pants/thrift_linter:bad-thrift-default', '--thrift-linter-strict'] pants_run = self.run_pants(cmd) self.assert_failure(pants_run) self.assertTrue('Lint errors found!' in pants_run.stdout_data) def test_bad_strict_override(self): # thrift-linter passes with non-strict command line flag overriding the BUILD section. cmd = ['goal', 'thrift-linter', 'testprojects/src/thrift/com/pants/thrift_linter:bad-thrift-strict', '--no-thrift-linter-strict'] pants_run = self.run_pants(cmd) self.assert_success(pants_run) self.assertTrue('Lint errors found!' in pants_run.stdout_data) def test_bad_non_strict_override(self): # thrift-linter fails with command line flag overriding the BUILD section. cmd = ['goal', 'thrift-linter', 'testprojects/src/thrift/com/pants/thrift_linter:bad-thrift-non-strict', '--thrift-linter-strict'] pants_run = self.run_pants(cmd) self.assert_failure(pants_run) self.assertTrue('Lint errors found!' in pants_run.stdout_data) def test_bad_pants_ini_strict(self): # thrift-linter fails if pants.ini has a thrift-linter:strict=True setting cmd = ['goal', 'thrift-linter', 'testprojects/src/thrift/com/pants/thrift_linter:bad-thrift-default',] pants_ini_config = {'thrift-linter': {'strict': True}} pants_run = self.run_pants(cmd, config = pants_ini_config) self.assert_failure(pants_run) self.assertTrue('Lint errors found!' in pants_run.stdout_data) def test_bad_pants_ini_strict_overridden(self): # thrift-linter passes if pants.ini has a thrift-linter:strict=True setting and # a command line non-strict flag is passed. cmd = ['goal', 'thrift-linter', 'testprojects/src/thrift/com/pants/thrift_linter:bad-thrift-default', '--no-thrift-linter-strict'] pants_ini_config = {'thrift-linter': {'strict': True}} pants_run = self.run_pants(cmd, config = pants_ini_config) self.assert_success(pants_run) self.assertTrue('Lint errors found!' in pants_run.stdout_data)
from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FlavorExtraSpecsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3): ADMIN_API = True extension_name = 'flavor-extra-specs' # TODO(park): Overriding '_api_version' till all functional tests # are merged between v2 and v2.1. After that base class variable # itself can be changed to 'v2' _api_version = 'v2' def _get_flags(self): f = super(FlavorExtraSpecsSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavorextraspecs.' 'Flavorextraspecs') return f def _flavor_extra_specs_create(self): subs = {'value1': 'value1', 'value2': 'value2' } response = self._do_post('flavors/1/os-extra_specs', 'flavor-extra-specs-create-req', subs) self._verify_response('flavor-extra-specs-create-resp', subs, response, 200) def test_flavor_extra_specs_get(self): subs = {'value1': 'value1'} self._flavor_extra_specs_create() response = self._do_get('flavors/1/os-extra_specs/key1') self._verify_response('flavor-extra-specs-get-resp', subs, response, 200) def test_flavor_extra_specs_list(self): subs = {'value1': 'value1', 'value2': 'value2' } self._flavor_extra_specs_create() response = self._do_get('flavors/1/os-extra_specs') self._verify_response('flavor-extra-specs-list-resp', subs, response, 200) def test_flavor_extra_specs_create(self): self._flavor_extra_specs_create() def test_flavor_extra_specs_update(self): subs = {'value1': 'new_value1'} self._flavor_extra_specs_create() response = self._do_put('flavors/1/os-extra_specs/key1', 'flavor-extra-specs-update-req', subs) self._verify_response('flavor-extra-specs-update-resp', subs, response, 200) def test_flavor_extra_specs_delete(self): self._flavor_extra_specs_create() response = self._do_delete('flavors/1/os-extra_specs/key1') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, '')
import acm import ael import FHTI_EDD_OTC_Util import HTI_ExcelReport2 import HTI_Util import HTI_FeedTrade_EDD_Util import os import win32com.client import HTI_CollateralManagement_TRS import sqlite3 ael_variables = [['posdate', 'Date', 'string', [str(ael.date_today()), 'Today'], 'Today', 1, 0, 'Report Date', None, 1], \ ['deltaone_filename', 'Deltaone File', 'string', None, 'S:\\Deltaone\\Reports\\Deltaone Management Report v1.5.xlsm', 0, 0, 'Deltaone File', None, 1], \ ['margin_call_filename', 'Margin Call File', 'string', None, 'D:\\temp\\TRS\\CPTY\\margin_call_YYYYMMDD.xlsx', 0, 0, 'Margin Call File', None, 1], \ ['margin_call_template', 'Margin Call Template', 'string', None, 'S:\\Deltaone\\Collateral & Margin Call Templates\\HTI\\Margin call Notice.xlsx', 0, 0, 'Margin Call Template', None, 1], \ ['acquirers', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTIFS - EDD,HTISEC - EDD', 1, 1, 'Acquirer(s)', None, 1], \ ['portfolio', 'Portfolio', 'string', HTI_Util.getAllPortfolios(), 'EDD', 1, 1, 'Portfolio', None, 1], \ ['sendEmail', 'Send mail', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Send Success Email', None, 1], \ ['emaillist', 'Email', 'string', None, 'louis.ck.wong@htisec.com', 0, 0, 'Email List', None, 0], \ ['subject', 'Email subject', 'string', None, 'FA4 (PROD) EDD Collateral Management Report (TRS) asof @date', 1, 0, 'Email Subject', None, 0], \ ['title', 'Report title', 'string', None, 'Collateral Management Report (TRS) asof @date', 1, 0, 'Report Title', None, 1], \ ['currclspricemkt', 'Current Closing Price Market', 'string', None, 'MSS_SPOT', 1, 0, 'Current Closing Price Market', None, 1], \ ['histclspricemkt', 'Historical Closing Price Market', 'string', None, 'MSS_CLS', 1, 0, 'Historical Closing Price Market', None, 1], \ ['base_ccy', 'Base Ccy', 'string', None, 'HKD', 1, 0, 'Base Ccy', None, 1]] def question_marks(st): question_marks = '?' for i in range(0, len(st.split(','))-1): question_marks = question_marks + ",?" return question_marks def db_cur(source = ":memory:"): conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES) conn.row_factory = sqlite3.Row cur = conn.cursor() return conn, cur def create_tbl(cur, tbl_name, header, arr = None, index_arr = None): cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name)) tbl_exists = cur.fetchone() if tbl_exists[0] == 0: cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );") if index_arr is not None: for index in index_arr: cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");") if arr is not None: cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr) return def margin_call(cur, dict): to_cpty_addr = "$A$9" cpty_addr1_addr = "$A$10" cpty_addr2_addr = "$A$11" cpty_addr3_addr = "$A$12" mtm_exposure_addr = "$K$29" fx_currency_addr = "$L$29" mtm_ltv_addr = "$K$32" maint_margin_addr = "$K$32" xl = win32com.client.Dispatch('Excel.Application') cur.execute("""select * from rpt where Loan_Amount <> 0 and MTM_LTV > Coll_Trigger_LTV""") for mtm_row in cur.fetchall(): mtm_exp = mtm_row["MTM_Exposure"] mtm_margin = mtm_row["MTM_LTV"] maint_margin = mtm_row["Coll_Trigger_LTV"] collateral_held = mtm_row["Ini_Cash_Bal"] delivery_amt = (mtm_row["Loan_Amount"]/mtm_row["Today_FX"]/mtm_row["Coll_Trigger_LTV"])-mtm_row["MV_in_FX"]+mtm_row["Exe_Notional"] wb = xl.Workbooks.Open(dict["margin_call_filename"]) ws = wb.Worksheets(1) return def ael_main(dict): dict["temp_filename"] = "" asofdate = dict["posdate"] if asofdate == 'Today': posdate = ael.date_today() else: asofdateArr = dict['posdate'].split('/') posdate = ael.date_from_ymd(int(asofdateArr[2]), int(asofdateArr[1]), int(asofdateArr[0])) rpt_header = "Counterparty,Contract_Date,Contract_No,TRS,Security_Name,BBG_Code,Currency,Quantity,Avg_Exe_Price,Exe_Notional,Init_Price,Ini_Cash_Bal,Avail_Cash_Bal,Loan_Curr,Loan_Amount,Loan_Currency,Closing_Price,MV_in_FX,FX_Currency,Today_FX,MV_in_HKD,All_Time_LTV,Coll_Trigger_LTV,Terminate_LTV,MTM_LTV,Margin_Pool,MTM_Exposure,Est_All_Time_LTV,Est_Coll_Trigger_LTV,Est_Terminate_LTV,Cum_Income_Client_To_Pay,Cum_EDD_Net_Inc,Cum_PWM_Net_Inc" rpt_arr = HTI_CollateralManagement_TRS.genTRSReport(posdate, dict, None, 'Positions') conn, cur = db_cur() create_tbl(cur, "rpt", rpt_header, rpt_arr) # cur.execute("select * from rpt") # for row in cur.fetchall(): # print row["Avail_Cash_Bal"] margin_call(cur, dict) return
"""Compiles all *.proto files it finds into *_pb2.py.""" from __future__ import print_function import logging import optparse import os import re import shutil import subprocess import sys import tempfile THIS_DIR = os.path.dirname(os.path.abspath(__file__)) MIN_SUPPORTED_PROTOC_VERSION = (3, 17, 3) MAX_SUPPORTED_PROTOC_VERSION = (3, 17, 3) PROTOC_INSTALL_HELP = ( "Could not find working protoc (%s <= ver <= %s) in PATH." % ( '.'.join(map(str, MIN_SUPPORTED_PROTOC_VERSION)), '.'.join(map(str, MAX_SUPPORTED_PROTOC_VERSION)), )) IGNORED_PATHS = [ re.compile(r'.*(/|\\)third_party(/|\\)?'), ] def is_ignored(path): """True if |path| matches any regexp in IGNORED_PATHS.""" return any(b.match(path) for b in IGNORED_PATHS) def find_proto_files(path): """Recursively searches for *.proto files, yields absolute paths to them.""" path = os.path.abspath(path) for dirpath, dirnames, filenames in os.walk(path, followlinks=True): # Skip hidden and ignored directories skipped = [ x for x in dirnames if x[0] == '.' or is_ignored(os.path.join(dirpath, x)) ] for dirname in skipped: dirnames.remove(dirname) # Yield *.proto files. for name in filenames: if name.endswith('.proto'): yield os.path.join(dirpath, name) def get_protoc(): """Returns protoc executable path (maybe relative to PATH).""" return 'protoc.exe' if sys.platform == 'win32' else 'protoc' def compile_proto(proto_file, proto_path, output_path=None): """Invokes 'protoc', compiling single *.proto file into *_pb2.py file. Args: proto_file: the file to compile. proto_path: the root of proto file directory tree. output_path: the root of the output directory tree. Defaults to `proto_path`. Returns: The path of the generated _pb2.py file. """ output_path = output_path or proto_path cmd = [get_protoc()] cmd.append('--proto_path=%s' % proto_path) # Reuse embedded google protobuf. root = os.path.dirname(os.path.dirname(os.path.dirname(THIS_DIR))) cmd.append('--proto_path=%s' % os.path.join(root, 'client', 'third_party')) cmd.append('--python_out=%s' % output_path) cmd.append('--prpc-python_out=%s' % output_path) cmd.append(proto_file) logging.debug('Running %s', cmd) env = os.environ.copy() env['PATH'] = os.pathsep.join([THIS_DIR, env.get('PATH', '')]) subprocess.check_call(cmd, env=env) return proto_file.replace('.proto', '_pb2.py').replace(proto_path, output_path) def check_proto_compiled(proto_file, proto_path): """Return True if *_pb2.py on disk is up to date.""" # Missing? expected_path = proto_file.replace('.proto', '_pb2.py') if not os.path.exists(expected_path): return False # Helper to read contents of a file. def read(path): with open(path, 'r') as f: return f.read() # Compile *.proto into temp file to compare the result with existing file. tmp_dir = tempfile.mkdtemp() try: try: compiled = compile_proto(proto_file, proto_path, output_path=tmp_dir) except subprocess.CalledProcessError: return False return read(compiled) == read(expected_path) finally: shutil.rmtree(tmp_dir) def compile_all_files(root_dir, proto_path): """Compiles all *.proto files it recursively finds in |root_dir|.""" root_dir = os.path.abspath(root_dir) success = True for path in find_proto_files(root_dir): try: compile_proto(path, proto_path) except subprocess.CalledProcessError: print('Failed to compile: %s' % path[len(root_dir) + 1:], file=sys.stderr) success = False return success def check_all_files(root_dir, proto_path): """Returns True if all *_pb2.py files on disk are up to date.""" root_dir = os.path.abspath(root_dir) success = True for path in find_proto_files(root_dir): if not check_proto_compiled(path, proto_path): print( 'Need to recompile file: %s' % path[len(root_dir) + 1:], file=sys.stderr) success = False return success def get_protoc_version(): """Returns the version of installed 'protoc', or None if not found.""" cmd = [get_protoc(), '--version'] try: logging.debug('Running %s', cmd) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) out, _ = proc.communicate() if proc.returncode: logging.debug('protoc --version returned %d', proc.returncode) return None except OSError as err: logging.debug('Failed to run protoc --version: %s', err) return None match = re.match('libprotoc (.*)', out) if not match: logging.debug('Unexpected output of protoc --version: %s', out) return None return tuple(map(int, match.group(1).split('.'))) def main(args, app_dir=None): parser = optparse.OptionParser( description=sys.modules['__main__'].__doc__, usage='%prog [options]' + ('' if app_dir else ' <root dir>')) parser.add_option( '-c', '--check', action='store_true', help='Only check that all *.proto files are up to date') parser.add_option('-v', '--verbose', action='store_true') parser.add_option( '--proto_path', help=( 'Used to calculate relative paths of proto files in the registry. ' 'Defaults to the input directory.' )) options, args = parser.parse_args(args) logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR) root_dir = None if not app_dir: if len(args) != 1: parser.error('Expecting single argument') root_dir = args[0] else: if args: parser.error('Unexpected arguments') root_dir = app_dir # Ensure protoc compiler is up-to-date. protoc_version = get_protoc_version() if protoc_version is None or protoc_version < MIN_SUPPORTED_PROTOC_VERSION: if protoc_version: existing = '.'.join(map(str, protoc_version)) expected = '.'.join(map(str, MIN_SUPPORTED_PROTOC_VERSION)) print( 'protoc version is too old (%s), expecting at least %s.\n' % (existing, expected), file=sys.stderr) sys.stderr.write(PROTOC_INSTALL_HELP) return 1 # Make sure protoc produces code compatible with vendored libprotobuf. if protoc_version > MAX_SUPPORTED_PROTOC_VERSION: existing = '.'.join(map(str, protoc_version)) expected = '.'.join(map(str, MAX_SUPPORTED_PROTOC_VERSION)) print( 'protoc version is too new (%s), expecting at most %s.\n' % (existing, expected), file=sys.stderr) sys.stderr.write(PROTOC_INSTALL_HELP) return 1 proto_path = os.path.abspath(options.proto_path or root_dir) if options.check: success = check_all_files(root_dir, proto_path) else: success = compile_all_files(root_dir, proto_path) return int(not success) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
""" Brocade south bound connector to communicate with switch using HTTP or HTTPS protocol. """ import time from oslo_log import log as logging from oslo_serialization import base64 from oslo_utils import encodeutils import requests import six from cinder.i18n import _ from cinder.zonemanager.drivers.brocade import exception as b_exception import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant LOG = logging.getLogger(__name__) class BrcdHTTPFCZoneClient(object): def __init__(self, ipaddress, username, password, port, vfid, protocol): """Initializing the client with the parameters passed. Creates authentication token and authenticate with switch to ensure the credentials are correct and change the VF context. :param ipaddress: IP Address of the device. :param username: User id to login. :param password: User password. :param port: Device Communication port :param vfid: Virtual Fabric ID. :param protocol: Communication Protocol. """ self.switch_ip = ipaddress self.switch_user = username self.switch_pwd = password self.protocol = protocol self.vfid = vfid self.cfgs = {} self.zones = {} self.alias = {} self.qlps = {} self.ifas = {} self.active_cfg = '' self.parsed_raw_zoneinfo = "" self.random_no = '' self.auth_version = '' self.session = None # Create and assign the authentication header based on the credentials self.auth_header = self.create_auth_token() # Authenticate with the switch # If authenticated successfully, save the auth status and # create auth header for future communication with the device. self.is_auth, self.auth_header = self.authenticate() self.check_change_vf_context() def connect(self, requestType, requestURL, payload='', header=None): """Connect to the switch using HTTP/HTTPS protocol. :param requestType: Connection Request method :param requestURL: Connection URL :param payload: Data to send with POST request :param header: Request Headers :returns: HTTP response data :raises BrocadeZoningHttpException: """ try: if header is None: header = {} header.update({"User-Agent": "OpenStack Zone Driver"}) # Ensure only one connection is made throughout the life cycle protocol = zone_constant.HTTP if self.protocol == zone_constant.PROTOCOL_HTTPS: protocol = zone_constant.HTTPS if self.session is None: self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=1, pool_maxsize=1) self.session.mount(protocol + '://', adapter) url = '%s://%s%s' % (protocol, self.switch_ip, requestURL) response = None if requestType == zone_constant.GET_METHOD: response = self.session.get(url, headers=(header), verify=False) elif requestType == zone_constant.POST_METHOD: response = self.session.post(url, payload, headers=(header), verify=False) # Throw exception when response status is not OK if response.status_code != zone_constant.STATUS_OK: msg = _("Error while querying page %(url)s on the switch, " "reason %(error)s.") % {'url': url, 'error': response.reason} raise b_exception.BrocadeZoningHttpException(msg) else: return response.text except requests.exceptions.ConnectionError as e: msg = (_("Error while connecting the switch %(switch_id)s " "with protocol %(protocol)s. Error: %(error)s.") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'error': six.text_type(e)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) except b_exception.BrocadeZoningHttpException as ex: msg = (_("Unexpected status code from the switch %(switch_id)s " "with protocol %(protocol)s for url %(page)s. " "Error: %(error)s") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'page': requestURL, 'error': six.text_type(ex)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def create_auth_token(self): """Create the authentication token. Creates the authentication token to use in the authentication header return authentication header (Base64(username:password:random no)). :returns: Authentication Header :raises BrocadeZoningHttpException: """ try: # Send GET request to secinfo.html to get random number response = self.connect(zone_constant.GET_METHOD, zone_constant.SECINFO_PAGE) parsed_data = self.get_parsed_data(response, zone_constant.SECINFO_BEGIN, zone_constant.SECINFO_END) # Get the auth version for 8.1.0b+ switches self.auth_version = self.get_nvp_value(parsed_data, zone_constant.AUTHVERSION) if self.auth_version == "1": # Extract the random no from secinfo.html response self.random_no = self.get_nvp_value(parsed_data, zone_constant.RANDOM) # Form the authentication string auth_string = '%s:%s:%s' % (self.switch_user, self.switch_pwd, self.random_no) else: auth_string = '%s:%s' % (self.switch_user, self.switch_pwd) auth_token = base64.encode_as_text(auth_string).strip() auth_header = (zone_constant.AUTH_STRING + auth_token) # Build the proper header except Exception as e: msg = (_("Error while creating authentication token: %s") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return auth_header def authenticate(self): """Authenticate with the switch. Returns authentication status with modified authentication header (Base64(username:xxx:random no)). :returns: Authentication status :raises BrocadeZoningHttpException: """ headers = {zone_constant.AUTH_HEADER: self.auth_header} try: # GET Request to authenticate.html to verify the credentials response = self.connect(zone_constant.GET_METHOD, zone_constant.AUTHEN_PAGE, header=headers) parsed_data = self.get_parsed_data(response, zone_constant.AUTHEN_BEGIN, zone_constant.AUTHEN_END) isauthenticated = self.get_nvp_value( parsed_data, zone_constant.AUTHENTICATED) if isauthenticated == "yes": if self.auth_version == "3": auth_id = self.get_nvp_value(parsed_data, zone_constant.IDENTIFIER) auth_string = '%s:xxx:%s' % (self.switch_user, auth_id) else: # Replace password in the authentication string with xxx auth_string = '%s:xxx:%s' % (self.switch_user, self.random_no) auth_token = base64.encode_as_text(auth_string).strip() auth_header = zone_constant.AUTH_STRING + auth_token return True, auth_header else: auth_error_code = self.get_nvp_value(parsed_data, "errCode") msg = (_("Authentication failed, verify the switch " "credentials, error code %s.") % auth_error_code) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) except Exception as e: msg = (_("Error while authenticating with switch: %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def get_session_info(self): """Get the session information from the switch :returns: Connection status information. """ try: headers = {zone_constant.AUTH_HEADER: self.auth_header} # GET request to session.html response = self.connect(zone_constant.GET_METHOD, zone_constant.SESSION_PAGE_ACTION, header=headers) except Exception as e: msg = (_("Error while getting session information %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return response def get_parsed_data(self, data, delim1, delim2): """Return the sub string between the delimiters. :param data: String to manipulate :param delim1: Delimiter 1 :param delim2: Delimiter 2 :returns: substring between the delimiters """ try: start = data.index(delim1) start = start + len(delim1) end = data.index(delim2) return data[start:end] except ValueError as e: msg = (_("Error while parsing the data: %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def get_nvp_value(self, data, keyname): """Get the value for the key passed. :param data: NVP to manipulate :param keyname: Key name :returns: value for the NVP """ try: start = data.index(keyname) start = start + len(keyname) temp = data[start:] end = temp.index("\n") return (temp[:end].lstrip('= ')) except ValueError as e: msg = (_("Error while getting nvp value: %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def get_managable_vf_list(self, session_info): """List of VFIDs that can be managed. :param session_info: Session information from the switch :returns: manageable VF list :raises BrocadeZoningHttpException: """ try: # Check the value of manageableLFList NVP, # throw exception as not supported if the nvp not available vf_list = self.get_nvp_value(session_info, zone_constant.MANAGEABLE_VF) if vf_list: vf_list = vf_list.split(",") # convert the string to list except b_exception.BrocadeZoningHttpException as e: msg = (_("Error while checking whether " "VF is available for management %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return vf_list[:-1] def change_vf_context(self, vfid, session_data): """Change the VF context in the session. :param vfid: VFID to which context should be changed. :param session_data: Session information from the switch :raises BrocadeZoningHttpException: """ try: managable_vf_list = self.get_managable_vf_list(session_data) LOG.debug("Manageable VF IDs are %(vflist)s.", {'vflist': managable_vf_list}) # proceed changing the VF context # if VF id can be managed if not throw exception if vfid in managable_vf_list: headers = {zone_constant.AUTH_HEADER: self.auth_header} data = zone_constant.CHANGE_VF.format(vfid=vfid) response = self.connect(zone_constant.POST_METHOD, zone_constant.SESSION_PAGE, data, headers) parsed_info = self.get_parsed_data(response, zone_constant.SESSION_BEGIN, zone_constant.SESSION_END) session_LF_Id = self.get_nvp_value(parsed_info, zone_constant.SESSION_LF_ID) if session_LF_Id == vfid: LOG.info("VF context is changed in the session.") else: msg = _("Cannot change VF context in the session.") LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) else: msg = (_("Cannot change VF context, " "specified VF is not available " "in the manageable VF list %(vf_list)s.") % {'vf_list': managable_vf_list}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) except b_exception.BrocadeZoningHttpException as e: msg = (_("Error while changing VF context %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def get_zone_info(self): """Parse all the zone information and store it in the dictionary.""" try: self.cfgs = {} self.zones = {} self.active_cfg = '' self.alias = {} self.qlps = {} self.ifas = {} headers = {zone_constant.AUTH_HEADER: self.auth_header} # GET request to gzoneinfo.htm response = self.connect(zone_constant.GET_METHOD, zone_constant.ZONE_PAGE, header=headers) # get the zone string from the response self.parsed_raw_zoneinfo = self.get_parsed_data( response, zone_constant.ZONEINFO_BEGIN, zone_constant.ZONEINFO_END).strip("\n") LOG.debug("Original zone string from the switch: %(zoneinfo)s", {'zoneinfo': self.parsed_raw_zoneinfo}) # convert the zone string to list zoneinfo = self.parsed_raw_zoneinfo.split() i = 0 while i < len(zoneinfo): info = zoneinfo[i] # check for the cfg delimiter if zone_constant.CFG_DELIM in info: # extract the cfg name cfg_name = info.lstrip(zone_constant.CFG_DELIM) # update the dict as # self.cfgs={cfg_name:zone_name1;zone_name2} self.cfgs.update({cfg_name: zoneinfo[i + 1]}) i = i + 2 # check for the zone delimiter elif zone_constant.ZONE_DELIM in info: # extract the zone name zone_name = info.lstrip(zone_constant.ZONE_DELIM) # update the dict as # self.zones={zone_name:members1;members2} self.zones.update({zone_name: zoneinfo[i + 1]}) i = i + 2 elif zone_constant.ALIAS_DELIM in info: alias_name = info.lstrip(zone_constant.ALIAS_DELIM) # update the dict as # self.alias={alias_name:members1;members2} self.alias.update({alias_name: zoneinfo[i + 1]}) i = i + 2 # check for quickloop zones elif zone_constant.QLP_DELIM in info: qlp_name = info.lstrip(zone_constant.QLP_DELIM) # update the map as self.qlps={qlp_name:members1;members2} self.qlps.update({qlp_name: zoneinfo[i + 1]}) i = i + 2 # check for fabric assist zones elif zone_constant.IFA_DELIM in info: ifa_name = info.lstrip(zone_constant.IFA_DELIM) # update the map as self.ifas={ifa_name:members1;members2} self.ifas.update({ifa_name: zoneinfo[i + 1]}) i = i + 2 elif zone_constant.ACTIVE_CFG_DELIM in info: # update the string self.active_cfg=cfg_name self.active_cfg = info.lstrip( zone_constant.ACTIVE_CFG_DELIM) if self.active_cfg == zone_constant.DEFAULT_CFG: self.active_cfg = "" i = i + 2 else: i = i + 1 except Exception as e: msg = (_("Error while changing VF context %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def is_supported_firmware(self): """Check firmware version is v6.4 or higher. This API checks if the firmware version per the plug-in support level. This only checks major and minor version. :returns: True if firmware is supported else False. :raises BrocadeZoningHttpException: """ isfwsupported = False try: headers = {zone_constant.AUTH_HEADER: self.auth_header} # GET request to switch.html response = self.connect(zone_constant.GET_METHOD, zone_constant.SWITCH_PAGE, header=headers) parsed_data = self.get_parsed_data(response, zone_constant.SWITCHINFO_BEGIN, zone_constant.SWITCHINFO_END) # get the firmware version nvp value fwVersion = self.get_nvp_value( parsed_data, zone_constant.FIRMWARE_VERSION).lstrip('v') ver = fwVersion.split(".") LOG.debug("Firmware version: %(version)s.", {'version': ver}) if int(ver[0] + ver[1]) > 63: isfwsupported = True except Exception as e: msg = (_("Error while checking the firmware version %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return isfwsupported def get_active_zone_set(self): """Return the active zone configuration. Return active zoneset from fabric. When none of the configurations are active then it will return empty map. :returns: Map -- active zone set map in the following format .. code-block:: python { 'zones': {'openstack50060b0000c26604201900051ee8e329': ['50060b0000c26604', '201900051ee8e329'] }, 'active_zone_config': 'OpenStack_Cfg' } :raises BrocadeZoningHttpException: """ active_zone_set = {} zones_map = {} try: self.get_zone_info() # get the zone information of the switch if self.active_cfg != '': # get the zones list of the active_Cfg zones_list = self.cfgs[self.active_cfg].split(";") for n in zones_list: # build the zones map zones_map.update( {n: self.zones[n].split(";")}) # Format map in the correct format active_zone_set = { "active_zone_config": self.active_cfg, "zones": zones_map} return active_zone_set except Exception as e: msg = (_("Failed getting active zone set from fabric %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def add_zones(self, add_zones_info, activate, active_zone_set=None): """Add zone configuration. This method will add the zone configuration passed by user. :param add_zones_info: Zone names mapped to members. Zone members are colon separated but case-insensitive .. code-block:: python { zonename1:[zonememeber1,zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } :param activate: True will activate the zone config. :param active_zone_set: Active zone set dict retrieved from get_active_zone_set method :raises BrocadeZoningHttpException: """ LOG.debug("Add zones - zones passed: %(zones)s.", {'zones': add_zones_info}) cfg_name = zone_constant.CFG_NAME cfgs = self.cfgs zones = self.zones alias = self.alias qlps = self.qlps ifas = self.ifas active_cfg = self.active_cfg # update the active_cfg, zones and cfgs map with new information zones, cfgs, active_cfg = self.add_zones_cfgs(cfgs, zones, add_zones_info, active_cfg, cfg_name) # Build the zonestring with updated maps data = self.form_zone_string(cfgs, active_cfg, zones, alias, qlps, ifas, activate) LOG.debug("Add zones: final zone string after applying " "to the switch: %(zonestring)s", {'zonestring': data}) # Post the zone data to the switch error_code, error_msg = self.post_zone_data(data) if error_code != "0": msg = (_("Applying the zones and cfgs to the switch failed " "(error code=%(err_code)s error msg=%(err_msg)s.") % {'err_code': error_code, 'err_msg': error_msg}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def update_zones(self, zone_info, activate, operation, active_zone_set=None): """Update zone configuration. This method will update the zone configuration passed by user. :param zone_info: Zone names mapped to members. Zone members are colon separated but case-insensitive .. code-block:: python { zonename1:[zonememeber1,zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } :param activate: True will activate the zone config. :param operation: ZONE_ADD or ZONE_REMOVE :param active_zone_set: Active zone set dict retrieved from get_active_zone_set method :raises BrocadeZoningHttpException: """ LOG.debug("Update zones - zones passed: %(zones)s.", {'zones': zone_info}) cfgs = self.cfgs zones = self.zones alias = self.alias qlps = self.qlps ifas = self.ifas active_cfg = self.active_cfg # update the zones with new information zones = self._update_zones(zones, zone_info, operation) # Build the zonestring with updated maps data = self.form_zone_string(cfgs, active_cfg, zones, alias, qlps, ifas, activate) LOG.debug("Update zones: final zone string after applying " "to the switch: %(zonestring)s", {'zonestring': data}) # Post the zone data to the switch error_code, error_msg = self.post_zone_data(data) if error_code != "0": msg = (_("Applying the zones and cfgs to the switch failed " "(error code=%(err_code)s error msg=%(err_msg)s.") % {'err_code': error_code, 'err_msg': error_msg}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def form_zone_string(self, cfgs, active_cfg, zones, alias, qlps, ifas, activate): """Build the zone string in the required format. :param cfgs: cfgs map :param active_cfg: Active cfg string :param zones: zones map :param alias: alias map :param qlps: qlps map :param ifas: ifas map :param activate: True will activate config. :returns: zonestring in the required format :raises BrocadeZoningHttpException: """ try: zoneString = zone_constant.ZONE_STRING_PREFIX # based on the activate save only will be changed saveonly = "false" if activate is True else "true" # Form the zone string based on the dictionary of each items for cfg in sorted(cfgs.keys()): zoneString += (zone_constant.CFG_DELIM + cfg + " " + cfgs.get(cfg) + " ") for zone in sorted(zones.keys()): zoneString += (zone_constant.ZONE_DELIM + zone + " " + zones.get(zone) + " ") for al in sorted(alias.keys()): zoneString += (zone_constant.ALIAS_DELIM + al + " " + alias.get(al) + " ") for qlp in sorted(qlps.keys()): zoneString += (zone_constant.QLP_DELIM + qlp + " " + qlps.get(qlp) + " ") for ifa in sorted(ifas.keys()): zoneString += (zone_constant.IFA_DELIM + ifa + " " + ifas.get(ifa) + " ") # append the active_cfg string only if it is not null and activate # is true if active_cfg != "" and activate: zoneString += (zone_constant.ACTIVE_CFG_DELIM + active_cfg + " null ") # Build the final zone string zoneString += zone_constant.ZONE_END_DELIM + saveonly except Exception as e: msg = (_("Exception while forming the zone string: %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) # Reconstruct the zoneString to type base string for OpenSSL return encodeutils.safe_encode(zoneString) def add_zones_cfgs(self, cfgs, zones, add_zones_info, active_cfg, cfg_name): """Add the zones and cfgs map based on the new zones info. This method will return the updated zones,cfgs and active_cfg :param cfgs: Existing cfgs map :param active_cfg: Existing Active cfg string :param zones: Existing zones map :param add_zones_info: Zones map to add :param active_cfg: Existing active cfg :param cfg_name: New cfg name :returns: updated zones, zone configs map, and active_cfg """ cfg_string = "" delimiter = "" zones_in_active_cfg = "" try: if active_cfg: zones_in_active_cfg = cfgs.get(active_cfg) for zone_name, members in add_zones_info.items(): # if new zone is not active_cfg, build the cfg string with the # new zones if zone_name not in zones_in_active_cfg: cfg_string += delimiter + zone_name delimiter = ";" # add a new zone with the members zones.update({zone_name: ";".join(members)}) # update cfg string if active_cfg: if cfg_string: # update the existing active cfg map with cfgs string cfgs.update( {active_cfg: cfg_string + ";" + cfgs.get(active_cfg)}) else: # create new cfg and update that cfgs map with the new cfg active_cfg = cfg_name cfgs.update({cfg_name: cfg_string}) except Exception as e: msg = (_("Error while updating the new zones and cfgs " "in the zone string. Error %(description)s.") % {'description': six.text_type(e)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return zones, cfgs, active_cfg def _update_zones(self, zones, updated_zones_info, operation): """Update the zones based on the updated zones info. This method will return the updated zones :param zones: Existing zones map :param updated_zones_info: Zones map to update :param operation: ZONE_ADD or ZONE_REMOVE :returns: updated zones """ try: for zone_name in updated_zones_info: members = updated_zones_info[zone_name] # update the zone string # if zone name already exists and dont have the new members # already current_members = zones.get(zone_name).split(";") if operation == zone_constant.ZONE_ADD: new_members = set(members).difference(set(current_members)) if new_members: # update the existing zone with new members zones.update({zone_name: (";".join(new_members) + ";" + zones.get(zone_name))}) else: new_members = set(current_members).difference(set(members)) if new_members: zones.pop(zone_name) zones.update({zone_name: ";".join(new_members)}) except Exception as e: msg = (_("Error while updating the zones " "in the zone string. Error %(description)s.") % {'description': six.text_type(e)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return zones def is_vf_enabled(self): """To check whether VF is enabled or not. :returns: boolean to indicate VF enabled and session information """ session_info = self.get_session_info() parsed_data = self.get_parsed_data(session_info, zone_constant.SESSION_BEGIN, zone_constant.SESSION_END) try: is_vf_enabled = bool(self.get_nvp_value( parsed_data, zone_constant.VF_ENABLED)) except b_exception.BrocadeZoningHttpException: is_vf_enabled = False parsed_data = None return is_vf_enabled, parsed_data def get_nameserver_info(self): """Get name server data from fabric. Return the connected node port wwn list(local and remote) for the given switch fabric. :returns: name server information. """ nsinfo = [] headers = {zone_constant.AUTH_HEADER: self.auth_header} response = self.connect(zone_constant.GET_METHOD, zone_constant.NS_PAGE, header=headers) # GET request to nsinfo.html for line in response.splitlines(): if line.startswith(zone_constant.NS_DELIM): nsinfo.append(line.split('=')[-1]) return nsinfo def delete_zones_cfgs( self, cfgs, zones, delete_zones_info, active_cfg): """Delete the zones and cfgs map based on the new zones info. Return the updated zones, cfgs and active_cfg after deleting the required items. :param cfgs: Existing cfgs map :param active_cfg: Existing Active cfg string :param zones: Existing zones map :param delete_zones_info: Zones map to add :param active_cfg: Existing active cfg :returns: updated zones, zone config sets, and active zone config :raises BrocadeZoningHttpException: """ try: delete_zones_info = delete_zones_info.split(";") for zone in delete_zones_info: # remove the zones from the zone map zones.pop(zone) # iterated all the cfgs, but need to check since in SSH only # active cfg is iterated for k, v in list(cfgs.items()): v = v.split(";") if zone in v: # remove the zone from the cfg string v.remove(zone) # if all the zones are removed, remove the cfg from the # cfg map if not v: cfgs.pop(k) # update the original cfg with the updated string else: cfgs[k] = ";".join(v) # if all the zones are removed in the active_cfg, update it with # empty string if active_cfg not in cfgs: active_cfg = "" except KeyError as e: msg = (_("Error while removing the zones and cfgs " "in the zone string: %(description)s.") % {'description': six.text_type(e)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return zones, cfgs, active_cfg def delete_zones(self, delete_zones_info, activate, active_zone_set=None): """Delete zones from fabric. Deletes zones in the active zone config. :param zone_names: zoneNames separated by semicolon :param activate: True/False :param active_zone_set: the active zone set dict retrieved from get_active_zone_set method """ cfgs = self.cfgs zones = self.zones alias = self.alias qlps = self.qlps ifas = self.ifas active_cfg = self.active_cfg # update the active_cfg, zones and cfgs map with required information # being removed zones, cfgs, active_cfg = self.delete_zones_cfgs( cfgs, zones, delete_zones_info, active_cfg) # Build the zonestring with updated maps data = self.form_zone_string(cfgs, active_cfg, zones, alias, qlps, ifas, activate) LOG.debug("Delete zones: final zone string after applying " "to the switch: %(zonestring)s", {'zonestring': data}) error_code, error_msg = self.post_zone_data(data) if error_code != "0": msg = (_("Applying the zones and cfgs to the switch failed " "(error code=%(err_code)s error msg=%(err_msg)s.") % {'err_code': error_code, 'err_msg': error_msg}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def post_zone_data(self, data): """Send POST request to the switch with the payload. :param data: payload to be sent to switch """ status = "progress" parsed_data_txn = "" headers = {zone_constant.AUTH_HEADER: self.auth_header} LOG.debug("Requesting the switch with posting the zone string.") # POST request to gzoneinfo with zonestring as payload response = self.connect(zone_constant.POST_METHOD, zone_constant.ZONE_PAGE, data, headers) parsed_data = self.get_parsed_data(response, zone_constant.ZONE_TX_BEGIN, zone_constant.ZONE_TX_END) transID = self.get_nvp_value(parsed_data, zone_constant.ZONE_TX_ID) transURL = zone_constant.ZONE_TRAN_STATUS.format(txnId=transID) timeout = 360 sleep_time = 3 time_elapsed = 0 while(status != "done"): txn_response = self.connect( zone_constant.GET_METHOD, transURL, "", headers) parsed_data_txn = self.get_parsed_data(txn_response, zone_constant.ZONE_TX_BEGIN, zone_constant.ZONE_TX_END) status = self.get_nvp_value(parsed_data_txn, zone_constant.ZONE_TX_STATUS) time.sleep(sleep_time) time_elapsed += sleep_time if time_elapsed > timeout: break if status != "done": errorCode = -1 errorMessage = ("Timed out, waiting for zone transaction on " "the switch to complete") else: errorCode = self.get_nvp_value(parsed_data_txn, zone_constant.ZONE_ERROR_CODE) errorMessage = self.get_nvp_value(parsed_data_txn, zone_constant.ZONE_ERROR_MSG) return errorCode, errorMessage def check_change_vf_context(self): """Check whether VF related configurations is valid and proceed.""" vf_enabled, session_data = self.is_vf_enabled() # VF enabled will be false if vf is disable or not supported LOG.debug("VF enabled on switch: %(vfenabled)s.", {'vfenabled': vf_enabled}) # Change the VF context in the session if vf_enabled: if self.vfid is None: msg = _("No VF ID is defined in the configuration file.") LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) elif self.vfid != 128: self.change_vf_context(self.vfid, session_data) else: if self.vfid is not None: msg = _("VF is not enabled.") LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def _disconnect(self): """Disconnect from the switch using HTTP/HTTPS protocol. :raises BrocadeZoningHttpException: """ try: headers = {zone_constant.AUTH_HEADER: self.auth_header} response = self.connect(zone_constant.GET_METHOD, zone_constant.LOGOUT_PAGE, header=headers) return response except requests.exceptions.ConnectionError as e: msg = (_("Error while connecting the switch %(switch_id)s " "with protocol %(protocol)s. Error: %(error)s.") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'error': six.text_type(e)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) except b_exception.BrocadeZoningHttpException as ex: msg = (_("Unexpected status code from the switch %(switch_id)s " "with protocol %(protocol)s for url %(page)s. " "Error: %(error)s") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'page': zone_constant.LOGOUT_PAGE, 'error': six.text_type(ex)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def cleanup(self): """Close session.""" self._disconnect() self.session.close()
from scaleiopy import * from pprint import pprint import sys sio = scaleio.ScaleIO("https://" + sys.argv[1] + "/api",sys.argv[2],sys.argv[3],False,"ERROR") # HTTPS must be used as there seem to be an issue with 302 responses in Requests when using POST snapSpec = scaleio.SnapshotSpecification() snapSpec.addVolume(sio.get_volume_by_name(sys.argv[4])) print "**********" print "* Volume *" print "**********" pprint (sio.get_volume_by_name(sys.argv[4])) print "**********************" print "Snapshot specification" print "**********************" pprint (snapSpec) print "* Creating Snapshot" result = sio.create_snapshot(sio.get_system_id(), snapSpec) pprint (result)
"""Checkout Zookeeper ensemble. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import click from treadmill import context from treadmill import checkout from treadmill import zknamespace as z def _metadata(): """Returns check metadata.""" _meta = { 'index': 'name', 'query': 'select * from servers order by partition', 'checks': [ { 'description': 'Partitions capacity.', 'query': """ select name, partition, presence from servers where presence != 1 order by partition """, 'metric': """ select partition, count(*) as down from ({query}) group by partition """, 'alerts': [], }, { 'description': 'Topology syncronised.', 'query': """ select name, partition, in_zk, in_ldap from servers where in_zk == 0 or in_ldap == 0 order by partition """, 'metric': """ select count(*) as not_synced from ({query}) group by partition """, 'alerts': [ { 'description': 'Servers synced between LDAP and Zk', 'severity': 'error', 'threshold': { 'not_synced': 0 } }, ] } ] } admin_cell = context.GLOBAL.admin.cell() cell = admin_cell.get(context.GLOBAL.cell) partitions = cell.get('partitions', [{'_id': '_default'}]) has_default = False for partition in partitions: name = partition['_id'] down_threshold = partition.get('down-threshold', 0) if name == '_default': has_default = True _meta['checks'][0]['alerts'].append({ 'description': 'Partition: {partition}', 'severity': 'error', 'match': { 'partition': name, }, 'threshold': { 'down': down_threshold, } }) if not has_default: _meta['checks'][0]['alerts'].append({ 'description': 'Partition: {partition}', 'severity': 'error', 'match': { 'partition': '_default' }, 'threshold': { 'down': 0, } }) return _meta def init(): """Top level command handler.""" @click.command('servers') def check_servers(): """Check Zookeeper status.""" def _check(conn, **_kwargs): """Server state: """ admin_srv = context.GLOBAL.admin.server() servers_in_ldap = { server['_id']: server['partition'] for server in admin_srv.list({'cell': context.GLOBAL.cell}) } zkclient = context.GLOBAL.zk.conn presence = set(zkclient.get_children(z.SERVER_PRESENCE)) in_zk = set(zkclient.get_children(z.SERVERS)) blacked_out = set(zkclient.get_children(z.BLACKEDOUT_SERVERS)) conn.execute( """ CREATE TABLE servers ( name text, partition text, in_ldap, in_zk, up integer, blackout integer, presence integer ) """ ) all_servers = set(servers_in_ldap.keys()) | in_zk up = { server: checkout.telnet(server) for server in all_servers } rows = [] for name in set(servers_in_ldap.keys()) | in_zk: rows.append(( name, servers_in_ldap.get(name), name in servers_in_ldap, name in in_zk, name in up, name in blacked_out, name in presence, )) conn.executemany( """ INSERT INTO servers( name, partition, in_ldap, in_zk, up, blackout, presence ) values(?, ?, ?, ?, ?, ?, ?) """, rows ) return _metadata() return _check return check_servers
from django.contrib import admin from fclover.comment.models import * admin.site.register(CommentU2A) admin.site.register(CommentU2U) admin.site.register(MessageU2A) admin.site.register(MessageU2U)
from __future__ import absolute_import import datetime import json import math import time from google.api_core import datetime_helpers from google.cloud.pubsub_v1.subscriber._protocol import requests _MESSAGE_REPR = """\ Message {{ data: {!r} attributes: {} }}""" def _indent(lines, prefix=" "): """Indent some text. Note that this is present as ``textwrap.indent``, but not in Python 2. Args: lines (str): The newline delimited string to be indented. prefix (Optional[str]): The prefix to indent each line with. Default to two spaces. Returns: str: The newly indented content. """ indented = [] for line in lines.split("\n"): indented.append(prefix + line) return "\n".join(indented) class Message(object): """A representation of a single Pub/Sub message. The common way to interact with :class:`~.pubsub_v1.subscriber.message.Message` objects is to receive them in callbacks on subscriptions; most users should never have a need to instantiate them by hand. (The exception to this is if you are implementing a custom subclass to :class:`~.pubsub_v1.subscriber._consumer.Consumer`.) Attributes: message_id (str): The message ID. In general, you should not need to use this directly. data (bytes): The data in the message. Note that this will be a :class:`bytes`, not a text string. attributes (.ScalarMapContainer): The attributes sent along with the message. See :attr:`attributes` for more information on this type. publish_time (datetime): The time that this message was originally published. """ def __init__(self, message, ack_id, request_queue): """Construct the Message. .. note:: This class should not be constructed directly; it is the responsibility of :class:`BasePolicy` subclasses to do so. Args: message (~.pubsub_v1.types.PubsubMessage): The message received from Pub/Sub. ack_id (str): The ack_id received from Pub/Sub. request_queue (queue.Queue): A queue provided by the policy that can accept requests; the policy is responsible for handling those requests. """ self._message = message self._ack_id = ack_id self._request_queue = request_queue self.message_id = message.message_id # The instantiation time is the time that this message # was received. Tracking this provides us a way to be smart about # the default lease deadline. self._received_timestamp = time.time() def __repr__(self): # Get an abbreviated version of the data. abbv_data = self._message.data if len(abbv_data) > 50: abbv_data = abbv_data[:50] + b"..." pretty_attrs = json.dumps( dict(self.attributes), indent=2, separators=(",", ": "), sort_keys=True ) pretty_attrs = _indent(pretty_attrs) # We don't actually want the first line indented. pretty_attrs = pretty_attrs.lstrip() return _MESSAGE_REPR.format(abbv_data, pretty_attrs) @property def attributes(self): """Return the attributes of the underlying Pub/Sub Message. .. warning:: A ``ScalarMapContainer`` behaves slightly differently than a ``dict``. For a Pub / Sub message this is a ``string->string`` map. When trying to access a value via ``map['key']``, if the key is not in the map, then the default value for the string type will be returned, which is an empty string. It may be more intuitive to just cast the map to a ``dict`` or to one use ``map.get``. Returns: .ScalarMapContainer: The message's attributes. This is a ``dict``-like object provided by ``google.protobuf``. """ return self._message.attributes @property def data(self): """Return the data for the underlying Pub/Sub Message. Returns: bytes: The message data. This is always a bytestring; if you want a text string, call :meth:`bytes.decode`. """ return self._message.data @property def publish_time(self): """Return the time that the message was originally published. Returns: datetime: The date and time that the message was published. """ timestamp = self._message.publish_time delta = datetime.timedelta( seconds=timestamp.seconds, microseconds=timestamp.nanos // 1000 ) return datetime_helpers._UTC_EPOCH + delta @property def size(self): """Return the size of the underlying message, in bytes.""" return self._message.ByteSize() @property def ack_id(self): """str: the ID used to ack the message.""" return self._ack_id def ack(self): """Acknowledge the given message. Acknowledging a message in Pub/Sub means that you are done with it, and it will not be delivered to this subscription again. You should avoid acknowledging messages until you have *finished* processing them, so that in the event of a failure, you receive the message again. .. warning:: Acks in Pub/Sub are best effort. You should always ensure that your processing code is idempotent, as you may receive any given message more than once. """ time_to_ack = math.ceil(time.time() - self._received_timestamp) self._request_queue.put( requests.AckRequest( ack_id=self._ack_id, byte_size=self.size, time_to_ack=time_to_ack ) ) def drop(self): """Release the message from lease management. This informs the policy to no longer hold on to the lease for this message. Pub/Sub will re-deliver the message if it is not acknowledged before the existing lease expires. .. warning:: For most use cases, the only reason to drop a message from lease management is on :meth:`ack` or :meth:`nack`; these methods both call this one. You probably do not want to call this method directly. """ self._request_queue.put( requests.DropRequest(ack_id=self._ack_id, byte_size=self.size) ) def modify_ack_deadline(self, seconds): """Resets the deadline for acknowledgement. New deadline will be the given value of seconds from now. The default implementation handles this for you; you should not need to manually deal with setting ack deadlines. The exception case is if you are implementing your own custom subclass of :class:`~.pubsub_v1.subcriber._consumer.Consumer`. Args: seconds (int): The number of seconds to set the lease deadline to. This should be between 0 and 600. Due to network latency, values below 10 are advised against. """ self._request_queue.put( requests.ModAckRequest(ack_id=self._ack_id, seconds=seconds) ) def nack(self): """Decline to acknowldge the given message. This will cause the message to be re-delivered to the subscription. """ self._request_queue.put( requests.NackRequest(ack_id=self._ack_id, byte_size=self.size) )
from sqlalchemy import * meta = MetaData() def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine volumes = Table('volume', meta, autoload=True) volumes.c.clone_of.alter(name='restore_of') def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine volumes = Table('volume', meta, autoload=True) volumes.c.restore_of.alter(name='clone_of')
"""The HFS path specification implementation.""" from dfvfs.lib import definitions from dfvfs.path import factory from dfvfs.path import path_spec class HFSPathSpec(path_spec.PathSpec): """HFS path specification implementation. Attributes: data_stream (str): data stream name, where None indicates the default data stream. identifier (int): catalog node identifier (CNID). location (str): location. """ TYPE_INDICATOR = definitions.TYPE_INDICATOR_HFS def __init__( self, data_stream=None, identifier=None, location=None, parent=None, **kwargs): """Initializes a path specification. Note that an HFS path specification must have a parent. Args: data_stream (Optional[str]): data stream name, where None indicates the default data stream. identifier (Optional[int]): catalog node identifier (CNID). location (Optional[str]): location. parent (Optional[PathSpec]): parent path specification. Raises: ValueError: when parent or both identifier and location are not set. """ if (not identifier and not location) or not parent: raise ValueError('Missing identifier and location, or parent value.') super(HFSPathSpec, self).__init__(parent=parent, **kwargs) self.data_stream = data_stream self.identifier = identifier self.location = location @property def comparable(self): """str: comparable representation of the path specification.""" string_parts = [] if self.data_stream: string_parts.append('data stream: {0:s}'.format(self.data_stream)) if self.identifier is not None: string_parts.append('identifier: {0:d}'.format(self.identifier)) if self.location is not None: string_parts.append('location: {0:s}'.format(self.location)) return self._GetComparable(sub_comparable_string=', '.join(string_parts)) factory.Factory.RegisterPathSpec(HFSPathSpec)
"""Manages progress bars for DVC repo.""" import logging import sys from threading import RLock import fsspec from tqdm import tqdm from dvc.env import DVC_IGNORE_ISATTY from dvc.utils import env2bool logger = logging.getLogger(__name__) tqdm.set_lock(RLock()) class Tqdm(tqdm): """ maximum-compatibility tqdm-based progressbars """ BAR_FMT_DEFAULT = ( "{percentage:3.0f}% {desc}|{bar}|" "{postfix[info]}{n_fmt}/{total_fmt}" " [{elapsed}<{remaining}, {rate_fmt:>11}]" ) # nested bars should have fixed bar widths to align nicely BAR_FMT_DEFAULT_NESTED = ( "{percentage:3.0f}%|{bar:10}|{desc:{ncols_desc}.{ncols_desc}}" "{postfix[info]}{n_fmt}/{total_fmt}" " [{elapsed}<{remaining}, {rate_fmt:>11}]" ) BAR_FMT_NOTOTAL = ( "{desc}{bar:b}|{postfix[info]}{n_fmt} [{elapsed}, {rate_fmt:>11}]" ) BYTES_DEFAULTS = { "unit": "B", "unit_scale": True, "unit_divisor": 1024, "miniters": 1, } def __init__( self, iterable=None, disable=None, level=logging.ERROR, desc=None, leave=False, bar_format=None, bytes=False, # pylint: disable=redefined-builtin file=None, total=None, postfix=None, **kwargs, ): """ bytes : shortcut for `unit='B', unit_scale=True, unit_divisor=1024, miniters=1` desc : persists after `close()` level : effective logging level for determining `disable`; used only if `disable` is unspecified disable : If (default: None) or False, will be determined by logging level. May be overridden to `True` due to non-TTY status. Skip override by specifying env var `DVC_IGNORE_ISATTY`. kwargs : anything accepted by `tqdm.tqdm()` """ kwargs = kwargs.copy() if bytes: kwargs = {**self.BYTES_DEFAULTS, **kwargs} else: kwargs.setdefault("unit_scale", total > 999 if total else True) if file is None: file = sys.stderr # auto-disable based on `logger.level` if not disable: disable = logger.getEffectiveLevel() > level # auto-disable based on TTY if ( not disable and not env2bool(DVC_IGNORE_ISATTY) and hasattr(file, "isatty") ): disable = not file.isatty() super().__init__( iterable=iterable, disable=disable, leave=leave, desc=desc, bar_format="!", lock_args=(False,), total=total, **kwargs, ) self.postfix = postfix or {"info": ""} if bar_format is None: if self.__len__(): self.bar_format = ( self.BAR_FMT_DEFAULT_NESTED if self.pos else self.BAR_FMT_DEFAULT ) else: self.bar_format = self.BAR_FMT_NOTOTAL else: self.bar_format = bar_format self.refresh() def update_msg(self, msg: str, n: int = 1) -> None: """ Sets `msg` as a postfix and calls `update(n)`. """ self.set_msg(msg) self.update(n) def set_msg(self, msg: str) -> None: self.postfix["info"] = f" {msg} |" def update_to(self, current, total=None): if total: self.total = total self.update(current - self.n) def wrap_fn(self, fn, callback=None): """ Returns a wrapped `fn` which calls `callback()` on each call. `callback` is `self.update` by default. """ if callback is None: callback = self.update def wrapped(*args, **kwargs): res = fn(*args, **kwargs) callback() return res return wrapped def as_callback(self): return FsspecCallback(self) def close(self): self.postfix["info"] = "" # remove ETA (either unknown or zero); remove completed bar self.bar_format = self.bar_format.replace("<{remaining}", "").replace( "|{bar:10}|", " " ) super().close() @property def format_dict(self): """inject `ncols_desc` to fill the display width (`ncols`)""" d = super().format_dict ncols = d["ncols"] or 80 # assumes `bar_format` has max one of ("ncols_desc" & "ncols_info") ncols_left = ( ncols - len(self.format_meter(ncols_desc=1, ncols_info=1, **d)) + 1 ) ncols_left = max(ncols_left, 0) if ncols_left: d["ncols_desc"] = d["ncols_info"] = ncols_left else: # work-around for zero-width description d["ncols_desc"] = d["ncols_info"] = 1 d["prefix"] = "" return d class FsspecCallback(fsspec.Callback): def __init__(self, progress_bar): self.progress_bar = progress_bar super().__init__() def set_size(self, size): if size is not None: self.progress_bar.total = size self.progress_bar.refresh() super().set_size(size) def relative_update(self, inc=1): self.progress_bar.update(inc) super().relative_update(inc) def absolute_update(self, value): self.progress_bar.update_to(value) super().absolute_update(value) @staticmethod def wrap_fn(cb, fn): def wrapped(*args, **kwargs): res = fn(*args, **kwargs) cb.relative_update() return res return wrapped def tdqm_or_callback_wrapped( fobj, method, total, callback=None, **pbar_kwargs ): if callback: from funcy import nullcontext from tqdm.utils import CallbackIOWrapper callback.set_size(total) wrapper = CallbackIOWrapper(callback.relative_update, fobj, method) return nullcontext(wrapper) return Tqdm.wrapattr(fobj, method, total=total, bytes=True, **pbar_kwargs) DEFAULT_CALLBACK = fsspec.callbacks.NoOpCallback()
"""Helpers for the 'hello' and legacy hello commands.""" import copy import datetime import itertools from typing import Any, Generic, List, Mapping, Optional, Set, Tuple from bson.objectid import ObjectId from pymongo import common from pymongo.server_type import SERVER_TYPE from pymongo.typings import _DocumentType class HelloCompat: CMD = "hello" LEGACY_CMD = "ismaster" PRIMARY = "isWritablePrimary" LEGACY_PRIMARY = "ismaster" LEGACY_ERROR = "not master" def _get_server_type(doc): """Determine the server type from a hello response.""" if not doc.get("ok"): return SERVER_TYPE.Unknown if doc.get("serviceId"): return SERVER_TYPE.LoadBalancer elif doc.get("isreplicaset"): return SERVER_TYPE.RSGhost elif doc.get("setName"): if doc.get("hidden"): return SERVER_TYPE.RSOther elif doc.get(HelloCompat.PRIMARY): return SERVER_TYPE.RSPrimary elif doc.get(HelloCompat.LEGACY_PRIMARY): return SERVER_TYPE.RSPrimary elif doc.get("secondary"): return SERVER_TYPE.RSSecondary elif doc.get("arbiterOnly"): return SERVER_TYPE.RSArbiter else: return SERVER_TYPE.RSOther elif doc.get("msg") == "isdbgrid": return SERVER_TYPE.Mongos else: return SERVER_TYPE.Standalone class Hello(Generic[_DocumentType]): """Parse a hello response from the server. .. versionadded:: 3.12 """ __slots__ = ("_doc", "_server_type", "_is_writable", "_is_readable", "_awaitable") def __init__(self, doc: _DocumentType, awaitable: bool = False) -> None: self._server_type = _get_server_type(doc) self._doc: _DocumentType = doc self._is_writable = self._server_type in ( SERVER_TYPE.RSPrimary, SERVER_TYPE.Standalone, SERVER_TYPE.Mongos, SERVER_TYPE.LoadBalancer, ) self._is_readable = self.server_type == SERVER_TYPE.RSSecondary or self._is_writable self._awaitable = awaitable @property def document(self) -> _DocumentType: """The complete hello command response document. .. versionadded:: 3.4 """ return copy.copy(self._doc) @property def server_type(self) -> int: return self._server_type @property def all_hosts(self) -> Set[Tuple[str, int]]: """List of hosts, passives, and arbiters known to this server.""" return set( map( common.clean_node, itertools.chain( self._doc.get("hosts", []), self._doc.get("passives", []), self._doc.get("arbiters", []), ), ) ) @property def tags(self) -> Mapping[str, Any]: """Replica set member tags or empty dict.""" return self._doc.get("tags", {}) @property def primary(self) -> Optional[Tuple[str, int]]: """This server's opinion about who the primary is, or None.""" if self._doc.get("primary"): return common.partition_node(self._doc["primary"]) else: return None @property def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" return self._doc.get("setName") @property def max_bson_size(self) -> int: return self._doc.get("maxBsonObjectSize", common.MAX_BSON_SIZE) @property def max_message_size(self) -> int: return self._doc.get("maxMessageSizeBytes", 2 * self.max_bson_size) @property def max_write_batch_size(self) -> int: return self._doc.get("maxWriteBatchSize", common.MAX_WRITE_BATCH_SIZE) @property def min_wire_version(self) -> int: return self._doc.get("minWireVersion", common.MIN_WIRE_VERSION) @property def max_wire_version(self) -> int: return self._doc.get("maxWireVersion", common.MAX_WIRE_VERSION) @property def set_version(self) -> Optional[int]: return self._doc.get("setVersion") @property def election_id(self) -> Optional[ObjectId]: return self._doc.get("electionId") @property def cluster_time(self) -> Optional[Mapping[str, Any]]: return self._doc.get("$clusterTime") @property def logical_session_timeout_minutes(self) -> Optional[int]: return self._doc.get("logicalSessionTimeoutMinutes") @property def is_writable(self) -> bool: return self._is_writable @property def is_readable(self) -> bool: return self._is_readable @property def me(self) -> Optional[Tuple[str, int]]: me = self._doc.get("me") if me: return common.clean_node(me) return None @property def last_write_date(self) -> Optional[datetime.datetime]: return self._doc.get("lastWrite", {}).get("lastWriteDate") @property def compressors(self) -> Optional[List[str]]: return self._doc.get("compression") @property def sasl_supported_mechs(self) -> List[str]: """Supported authentication mechanisms for the current user. For example:: >>> hello.sasl_supported_mechs ["SCRAM-SHA-1", "SCRAM-SHA-256"] """ return self._doc.get("saslSupportedMechs", []) @property def speculative_authenticate(self) -> Optional[Mapping[str, Any]]: """The speculativeAuthenticate field.""" return self._doc.get("speculativeAuthenticate") @property def topology_version(self) -> Optional[Mapping[str, Any]]: return self._doc.get("topologyVersion") @property def awaitable(self) -> bool: return self._awaitable @property def service_id(self) -> Optional[ObjectId]: return self._doc.get("serviceId") @property def hello_ok(self) -> bool: return self._doc.get("helloOk", False)
import copy import json import warnings import requests from indexclient.errors import BaseIndexError MAX_RETRIES = 10 UPDATABLE_ATTRS = [ "file_name", "urls", "version", "metadata", "acl", "authz", "urls_metadata", ] def json_dumps(data): return json.dumps({k: v for (k, v) in data.items() if v is not None}) def handle_error(resp): if 400 <= resp.status_code < 600: try: json = resp.json() resp.reason = json["error"] except (json.decoder.JSONDecodeError, KeyError): pass finally: resp.raise_for_status() def timeout_wrapper(func): def timeout(*args, **kwargs): kwargs.setdefault("timeout", 60) return func(*args, **kwargs) return timeout def retry_and_timeout_wrapper(func): def retry_logic_with_timeout(*args, **kwargs): kwargs.setdefault("timeout", 60) retries = 0 while retries < MAX_RETRIES: try: return func(*args, **kwargs) except requests.exceptions.ReadTimeout: retries += 1 if retries == MAX_RETRIES: raise return retry_logic_with_timeout class IndexClient(object): def __init__(self, baseurl, version="v0", auth=None): self.auth = auth self.url = baseurl self.version = version def url_for(self, *path): subpath = "/".join(path).lstrip("/") return "{}/{}".format(self.url.rstrip("/"), subpath) def check_status(self): """Check that the API we are trying to communicate with is online""" resp = requests.get(self.url + "/index") handle_error(resp) def global_get(self, did, no_dist=False): """ Makes a web request to the Indexd service global endpoint to retrieve an index document record. :param str did: The UUID for the index record we want to retrieve. :param boolean no_dist: *optional* Specify if we want distributed search or not :returns: A Document object representing the index record """ try: if no_dist: response = self._get(did, params={"no_dist": ""}) else: response = self._get(did) except requests.HTTPError as e: if e.response.status_code == 404: return None else: raise e return Document(self, did, json=response.json()) def get(self, did): """ Makes a web request to the Indexd service to retrieve an index document record. :param str did: The UUID for the index record we want to retrieve. :returns: A Document object representing the index record """ try: response = self._get("index", did) except requests.HTTPError as e: if e.response.status_code == 404: return None else: raise e return Document(self, did, json=response.json()) def bulk_request(self, dids): """ bulk_get makes one http request to the indexd service and retrieves a list of Documents based on the dids provided. Args: dids (list): list of dids for potential documents Returns: list: Document objects representing index records """ headers = {"content-type": "application/json"} try: response = self._post("bulk/documents", json=dids, headers=headers) except requests.HTTPError as exception: if exception.response.status_code == 404: return None else: raise exception return [Document(self, doc["did"], json=doc) for doc in response.json()] def get_with_params(self, params=None): """ Return a document object corresponding to the supplied parameters, such as ``{'hashes': {'md5': '...'}, 'size': '...', 'metadata': {'file_state': '...'}}``. """ # need to include all the hashes in the request # index client like signpost or indexd will need to handle the # query param `'hash': 'hash_type:hash'` params_copy = copy.deepcopy(params) or {} if "hashes" in params_copy: params_copy["hash"] = params_copy.pop("hashes") reformatted_params = dict() for param in ["hash", "metadata"]: if param in params_copy: reformatted_params[param] = [] for k, v in params_copy[param].items(): reformatted_params[param].append(str(k) + ":" + str(v)) del params_copy[param] reformatted_params.update(params_copy) reformatted_params["limit"] = 1 try: response = self._get("index", params=reformatted_params) except requests.HTTPError as e: if e.response.status_code == 404: return None else: raise e if not response.json()["records"]: return None json = response.json()["records"][0] did = json["did"] return Document(self, did, json=json) def list(self, limit=float("inf"), start=None, page_size=100): """ Returns a generator of document objects. """ return self.list_with_params(limit, start, page_size) def list_with_params( self, limit=float("inf"), start=None, page_size=100, params=None, negate_params=None, ): """ Return a generator of document object corresponding to the supplied parameters, such as ``{'hashes': {'md5': '...'}, 'size': '...', 'metadata': {'file_state': '...'}, 'urls_metadata': {'s3://url': {'state': '...'} }``. """ params_copy = copy.deepcopy(params) or {} if "hashes" in params_copy: params_copy["hash"] = params_copy.pop("hashes") if "urls_metadata" in params_copy: params_copy["urls_metadata"] = json.dumps(params_copy.pop("urls_metadata")) reformatted_params = dict() for param in ["hash", "metadata"]: if param in params_copy: reformatted_params[param] = [] for k, v in params_copy[param].items(): reformatted_params[param].append(str(k) + ":" + str(v)) del params_copy[param] reformatted_params.update(params_copy) reformatted_params.update({"limit": page_size, "start": start}) if negate_params: reformatted_params.update({"negate_params": json.dumps(negate_params)}) yielded = 0 while True: resp = self._get("index", params=reformatted_params, timeout=60) handle_error(resp) json_str = resp.json() if not json_str["records"]: return for doc in json_str["records"]: if yielded < limit: yield Document(self, None, json=doc) yielded += 1 else: return if len(json_str["records"]) == page_size: reformatted_params["start"] = json_str["records"][-1]["did"] else: # There's no more results return def create( self, hashes, size, did=None, urls=None, file_name=None, metadata=None, baseid=None, acl=None, urls_metadata=None, version=None, authz=None, ): """Create a new entry in indexd Args: hashes (dict): {hash type: hash value,} eg ``hashes={'md5': ab167e49d25b488939b1ede42752458b'}`` size (int): file size metadata associated with a given uuid did (str): provide a UUID for the new indexd to be made urls (list): list of URLs where you can download the UUID acl (list): access control list authz (str): RBAC string file_name (str): name of the file associated with a given UUID metadata (dict): additional key value metadata for this entry urls_metadata (dict): metadata attached to each url baseid (str): optional baseid to group with previous entries versions version (str): entry version string Returns: Document: indexclient representation of an entry in indexd """ if urls is None: urls = [] json = { "urls": urls, "form": "object", "hashes": hashes, "size": size, "file_name": file_name, "metadata": metadata, "urls_metadata": urls_metadata, "baseid": baseid, "acl": acl, "authz": authz, "version": version, } if did: json["did"] = did resp = self._post( "index/", headers={"content-type": "application/json"}, data=json_dumps(json), auth=self.auth, ) return Document(self, resp.json()["did"]) def add_alias_for_did(self, alias, did): """ Adds an alias for a document id (did). Once an alias is created for a did, the document can be retrieved by the alias using the `global_get(alias)` function. :param str alias: The alias we want to assign to the document id. :param str did: The document id for the index record we want to alias. :raises BaseIndexError: Raised if aliasing operation fails. """ alias_payload = {"aliases": [{"value": alias}]} resp = self._post( "index/{}/aliases/".format(did), headers={"content-type": "application/json"}, data=json.dumps(alias_payload), auth=self.auth, ) try: return resp.json() except ValueError as err: reason = json.dumps( {"error": "invalid json payload returned: {}".format(err)} ) raise BaseIndexError(resp.status_code, reason) # DEPRECATED 11/2019 -- interacts with old `/alias/` endpoint. # For creating aliases for indexd records, prefer using # the `add_alias_for_did` function, which interacts with the new # `/index/{GUID}/aliases` endpoint. def create_alias( self, record, size, hashes, release=None, metastring=None, host_authorities=None, keeper_authority=None, ): warnings.warn( ( "This function is deprecated. For creating aliases for indexd " "records, prefer using the `add_alias_for_did` function, which " "interacts with the new `/index/{GUID}/aliases` endpoint." ), DeprecationWarning, ) data = json_dumps( { "size": size, "hashes": hashes, "release": release, "metastring": metastring, "host_authorities": host_authorities, "keeper_authority": keeper_authority, } ) url = "alias/" + record headers = {"content-type": "application/json"} resp = self._put(url, headers=headers, data=data, auth=self.auth) return resp.json() def get_latest_version(self, did, skip_null_versions=False): """ Args: did (str): document id of an existing entry whose latest version is requested skip_null_versions (bool): if True, exclude entries without a version Returns: Document: latest version of the entry """ params = {"has_version": "true" if skip_null_versions else "false"} doc = self._get("index", did, "latest", params=params).json() if doc and "did" in doc: return Document(self, doc["did"], doc) return None def add_version(self, current_did, new_doc): """ Args: current_did (str): did of an existing index whose baseid will be shared new_doc (Document): the document version to add Return: Document: the version that was just added """ rev_doc = self._post( "index", current_did, json=new_doc.to_json(), auth=self.auth ).json() if rev_doc and "did" in rev_doc: return Document(self, rev_doc["did"]) return None def list_versions(self, did): # type: (str) -> list[Document] versions_dict = self._get("index", did, "versions").json() # type: dict versions = [] for _, version in versions_dict.items(): versions.append(Document(self, version["did"], version)) return versions @retry_and_timeout_wrapper def _get(self, *path, **kwargs): resp = requests.get(self.url_for(*path), **kwargs) handle_error(resp) return resp @timeout_wrapper def _post(self, *path, **kwargs): resp = requests.post(self.url_for(*path), **kwargs) handle_error(resp) return resp @timeout_wrapper def _put(self, *path, **kwargs): resp = requests.put(self.url_for(*path), **kwargs) handle_error(resp) return resp @timeout_wrapper def _delete(self, *path, **kwargs): resp = requests.delete(self.url_for(*path), **kwargs) handle_error(resp) return resp class DocumentDeletedError(Exception): pass class Document(object): def __init__(self, client, did, json=None): self.client = client self.did = did self._fetched = False self._deleted = False self._load(json) def __eq__(self, other_doc): """ equals `==` operator overload It doesn't matter the order of the urls list. What matters is the existence of the urls are the same on both sides. """ return self._sorted_doc == other_doc._sorted_doc def __ne__(self, other_doc): """ not equals `!=` operator overload It doesn't matter the order of the urls list. What matters is the existence of the urls are the same on both sides. """ return self._sorted_doc != other_doc._sorted_doc def __lt__(self, other_doc): return self.did < other_doc.did def __gt__(self, other_doc): return self.did > other_doc.did def __repr__(self): """ String representation of a Document Example: <Document(size=1, form=object, file_name=filename.txt, ...)> """ attributes = ", ".join( ["{}={}".format(attr, self.__dict__[attr]) for attr in self._attrs] ) return "<Document(" + attributes + ")>" def _check_deleted(self): if self._deleted: raise DocumentDeletedError("document {} has been deleted".format(self.did)) def _render(self, include_rev=True): self._check_deleted() if not self._fetched: raise RuntimeError( "Document must be fetched from the server before being rendered as json" ) return self._doc def to_json(self, include_rev=True): json = self._render(include_rev=include_rev) if self.did: json["did"] = self.did return json def _load(self, json=None): """ Load the document contents from the server or from the provided dictionary """ self._check_deleted() json = json or self.client._get("index", self.did).json() # set attributes to current Document for k, v in json.items(): self.__dict__[k] = v self._attrs = json.keys() self._fetched = True def _doc_for_update(self): """ return document with subset of attributes that are allowed to be updated """ return {k: v for k, v in self._doc.items() if k in UPDATABLE_ATTRS} @property def _doc(self): return {k: self.__dict__[k] for k in self._attrs} @property def _sorted_doc(self): """Return the _doc object but with all arrays in sorted order. This will allow us to compare dictionaries with lists correctly. We only care about the contents of the arrays not the order of them. """ return recursive_sort(self._doc) def patch(self): """Update attributes in an indexd Document "Patch" the current document attributes then upload the changed result to the indexd server. """ self._check_deleted() self.client._put( "index", self.did, params={"rev": self.rev}, headers={"content-type": "application/json"}, auth=self.client.auth, data=json.dumps(self._doc_for_update()), ) self._load() # to sync new rev from server def delete(self): self._check_deleted() self.client._delete( "index", self.did, auth=self.client.auth, params={"rev": self.rev} ) self._deleted = True def recursive_sort(value): """ Sort all the lists in the dictionary recursively so that we can compare a dictionary's contents being the same instead of comparing their order. """ if isinstance(value, dict): return {key: recursive_sort(value[key]) for key in value.keys()} elif isinstance(value, list): return sorted([recursive_sort(element) for element in value]) else: return value
from .columbia_imagecontentsearch import ColumbiaImageContentSearch from .settings import ColumbiaSetting def load(info): columbiaSetting = ColumbiaSetting() for setting in columbiaSetting.requiredSettings: columbiaSetting.get(setting) info['apiRoot'].columbia_imagecontentsearch = ColumbiaImageContentSearch()
import json import argparse from datetime import datetime, date from fuzzywuzzy import fuzz, process from elasticsearch import Elasticsearch from elasticsearch.helpers import streaming_bulk, scan parser = argparse.ArgumentParser() parser.add_argument("--config-path", type=str, action='store', default='../config.json') args = parser.parse_args() config = json.load(open(args.config_path)) client = Elasticsearch([{ 'host' : config['es']['host'], 'port' : config['es']['port'] }], timeout = 60000) query = { "query" : { "filtered" : { "filter" : { "missing" : { "field" : "__meta__.halts" } } } } } def run(query): for a in scan(client, index=config['symbology']['index'], query=query): res = client.search(index=config['suspension']['index'], body={ "_source" : ["company", "date", "link"], "query" : { "match" : { "company" : a['_source']['name'] } } }) if res['hits']['total'] > 0: mtc = res['hits']['hits'][0]['_source'] sym_name = a['_source']['name'].lower() halt_name = mtc['company'].lower() x = fuzz.token_sort_ratio(sym_name, halt_name) y = fuzz.ratio(sym_name, halt_name) halts = {"match_attempted" : True} if res['hits']['hits'][0]['_score'] >= 1 and x >= 90): halts.update(mtc) halts.update({ "fuzz_ratio" : y, "fuzz_token_sort_ratio" : x, "match_score" : a['_score'] }) yield { "_id" : a['_id'], "_type" : config['symbology']['_type'], "_index" : config['symbology']['index'], "_op_type" : "update", "doc" : { "__meta__" : { "halts" : halts } } } if __name__ == "__main__": for a,b in streaming_bulk(client, run(query), chunk_size=1000, raise_on_error=False): print a, b
"""Tests for numerical correctness.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test class Bias(keras.layers.Layer): """Layer that add a bias to its inputs.""" def build(self, input_shape): self.bias = self.add_variable('bias', (1,), initializer='zeros') def call(self, inputs): return inputs + self.bias class MultiInputSubclassed(keras.Model): """Subclassed Model that adds its inputs and then adds a bias.""" def __init__(self): super(MultiInputSubclassed, self).__init__() self.add = keras.layers.Add() self.bias = Bias() def call(self, inputs): added = self.add(inputs) return self.bias(added) def multi_input_functional(): """Functional Model that adds its inputs and then adds a bias.""" input_1 = keras.Input(shape=(1,)) input_2 = keras.Input(shape=(1,)) input_3 = keras.Input(shape=(1,)) added = keras.layers.Add()([input_1, input_2, input_3]) output = Bias()(added) return keras.Model([input_1, input_2, input_3], output) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class SimpleBiasTest(keras_parameterized.TestCase): def _get_simple_bias_model(self): model = testing_utils.get_model_from_layers([Bias()], input_shape=(1,)) model.compile(keras.optimizer_v2.gradient_descent.SGD(0.1), 'mae') return model def test_simple_bias_fit(self): x = np.array([[0.], [1.], [2.]]) y = np.array([[0.5], [2.], [3.5]]) model = self._get_simple_bias_model() history = model.fit(x, y, batch_size=3, epochs=5) self.assertAllClose(history.history['loss'], [1., 0.9, 0.8, 0.7, 0.6]) def test_simple_bias_evaluate(self): x = np.array([[0.], [1.], [2.]]) y = np.array([[1.], [3.], [5.]]) model = self._get_simple_bias_model() loss = model.evaluate(x, y, batch_size=1) self.assertAlmostEqual(loss, 2.) def test_simple_bias_predict(self): x = np.array([[0.], [1.], [2.]]) model = self._get_simple_bias_model() pred = model.predict(x, batch_size=1) self.assertAllClose(x, pred) @keras_parameterized.run_all_keras_modes class MultipleInputTest(keras_parameterized.TestCase): def _get_multiple_input_model(self, subclassed=True): if subclassed: model = MultiInputSubclassed() else: model = multi_input_functional() model.compile(keras.optimizer_v2.gradient_descent.SGD(0.1), 'mae') return model @parameterized.named_parameters(('subclassed', True), ('functional', False)) def test_multiple_input_fit(self, subclassed): x = [ np.array([[1.], [2.], [3.]]), np.array([[4.], [5.], [6.]]), np.array([[7.], [8.], [9.]]) ] y = np.array([[12.5], [16.], [19.5]]) model = self._get_multiple_input_model(subclassed) history = model.fit(x, y, batch_size=3, epochs=5) self.assertAllClose(history.history['loss'], [1., 0.9, 0.8, 0.7, 0.6]) @parameterized.named_parameters(('subclassed', True), ('functional', False)) def test_multiple_input_evaluate(self, subclassed): x = [ np.array([[1.], [2.], [3.]]), np.array([[4.], [5.], [6.]]), np.array([[7.], [8.], [9.]]) ] y = np.array([[13.], [17.], [21.]]) model = self._get_multiple_input_model(subclassed) loss = model.evaluate(x, y, batch_size=3) self.assertAlmostEqual(loss, 2.) @parameterized.named_parameters(('subclassed', True), ('functional', False)) def test_multiple_input_predict(self, subclassed): x = [ np.array([[1.], [2.], [3.]]), np.array([[4.], [5.], [6.]]), np.array([[7.], [8.], [9.]]) ] model = self._get_multiple_input_model(subclassed) pred = model.predict(x, batch_size=1) self.assertAllClose(pred, [[12.], [15.], [18.]]) if __name__ == '__main__': test.main()
from __future__ import print_function import os TEST_DIR = os.path.dirname(__file__) RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources') BUILD = ['build', '--no-notify', '--no-status'] CLEAN = ['clean', '--all', '--yes'] # , '--no-notify', '--no-color', '--no-status'] def test_add_package(): """Test build behavior when adding packages to the workspace""" pass # TODO: Implement this for various dependency relationships def test_remove_package(): """Test build behavior when removing packages from the workspace""" pass # TODO: Implement this for various dependency relationships def test_rename_package(): """Test build behavior when renaming a package in the workspace""" pass # TODO: Implement this for various dependency relationships def test_ignore_package(): """Test build behavior when adding a CATKIN_IGNORE file to a package in the workspace""" pass # TODO: Implement this for various dependency relationships def test_deblacklist(): """Test build behavior when removing a package from the blacklist that has yet to be built""" pass # TODO: Implement this for various dependency relationships
from __future__ import unicode_literals import os import unittest import io from lxml import etree from packtools.catalogs import checks SAMPLES_PATH = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'samples') class SetupTests(unittest.TestCase): def test_message_is_splitted(self): fp = etree.parse(io.BytesIO(b'<a><b>bar</b></a>')) self.assertEqual(checks.setup(fp), (fp, [])) class TeardownTests(unittest.TestCase): def test_returns_errorlist(self): fp = etree.parse(io.BytesIO(b'<a><b>bar</b></a>')) err_list = ['some error'] message = (fp, err_list) self.assertEqual(checks.teardown(message), err_list) class FundingGroupPipeTests(unittest.TestCase): """See the spec on the pipe's docstring for more info. """ def test_proposition_1_case_1(self): """ HasExplicitContract is True HasFundingGroup is True (HasExplicitContract <=> HasFundingGroup) is True """ sample = os.path.join(SAMPLES_PATH, '0034-8910-rsp-48-2-0206.xml') et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 0) def test_proposition_1_case_2(self): """ HasExplicitContract is True HasFundingGroup is False (HasExplicitContract <=> HasFundingGroup) is False """ sample = io.BytesIO(b""" <article> <front> <article-meta></article-meta> </front> <back> <fn-group> <fn id="fn1" fn-type="financial-disclosure"> <p>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</p> </fn> </fn-group> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 1) self.assertTrue("'fn-group'" in err_list[0].message) def test_proposition_1_case_3(self): """ HasExplicitContract is False HasFundingGroup is True (HasExplicitContract <=> HasFundingGroup) is False """ sample = io.BytesIO(b""" <article> <front> <article-meta> <funding-group> <award-group> <funding-source>Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis</funding-source> <award-id>CSV 234/07</award-id> </award-group> <funding-statement>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</funding-statement> </funding-group> </article-meta> </front> <back> <fn-group> <fn id="fn1"> <p>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</p> </fn> </fn-group> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 1) self.assertTrue("'funding-group'" in err_list[0].message) def test_proposition_1_case_4(self): """ HasExplicitContract is False HasFundingGroup is False (HasExplicitContract <=> HasFundingGroup) is True """ sample = io.BytesIO(b""" <article> <front> <article-meta></article-meta> </front> <back> <fn-group> <fn id="fn1"> <p>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</p> </fn> </fn-group> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 0) def test_proposition_3_case_1(self): """ ∃ContractNo<fn> [ ¬Registered(FundingGroup, ContractNo) ] ^ ∀ContractNo<ack> [ Registered(FundingGroup, ContractNo) ] ^ ∀ContractNo<funding-group> [ Registered(Ack, ContractNo) v Registered(Fn, ContractNo) ] """ sample = io.BytesIO(b""" <article> <front> <article-meta> <funding-group> <award-group> <funding-source>Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis</funding-source> <award-id>234/07</award-id> </award-group> <funding-statement>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</funding-statement> </funding-group> </article-meta> </front> <back> <fn-group> <fn id="fn1" fn-type="financial-disclosure"> <p>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</p> </fn> <fn id="fn2" fn-type="financial-disclosure"> <p>Foo Bar Office on Drugs and Crime (Process CSV 235/07).</p> </fn> </fn-group> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 1) self.assertTrue("'fn-group'" in err_list[0].message) def test_proposition_3_case_2(self): """ This cannot be checked by packtools' stylechecker. ∀ContractNo<fn> [ Registered(FundingGroup, ContractNo) ] ^ ∃ContractNo<ack> [ ¬Registered(FundingGroup, ContractNo) ] ^ ∀ContractNo<funding-group> [ Registered(Ack, ContractNo) v Registered(Fn, ContractNo) ] """ sample = io.BytesIO(b""" <article> <front> <article-meta> <funding-group> <award-group> <funding-source>Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis</funding-source> <award-id>234/07</award-id> </award-group> <funding-statement>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</funding-statement> </funding-group> </article-meta> </front> <back> <fn-group> <fn id="fn1" fn-type="financial-disclosure"> <p>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</p> </fn> </fn-group> <ack> <p>... this study was supported by FooBar under the process 235/09.</p> </ack> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 0) # the error could not be detected def test_proposition_3_case_3(self): """ ∀ContractNo<fn> [ Registered(FundingGroup, ContractNo) ] ^ ∀ContractNo<ack> [ Registered(FundingGroup, ContractNo) ] ^ ∃ContractNo<funding-group> [ ¬(Registered(Ack, ContractNo)) ^ ¬(Registered(Fn, ContractNo)) ] """ sample = io.BytesIO(b""" <article> <front> <article-meta> <funding-group> <award-group> <funding-source>Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis</funding-source> <award-id>234/07</award-id> </award-group> <award-group> <funding-source>Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis</funding-source> <award-id>236/08</award-id> </award-group> <funding-statement>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</funding-statement> </funding-group> </article-meta> </front> <back> <fn-group> <fn id="fn1" fn-type="financial-disclosure"> <p>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</p> </fn> </fn-group> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 1) self.assertTrue("'funding-group'" in err_list[0].message) def test_fn_p_with_no_content(self): sample = io.BytesIO(b""" <article> <front> <article-meta> </article-meta> </front> <back> <fn-group> <fn id="fn1" fn-type="financial-disclosure"> <p/> </fn> </fn-group> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 1) self.assertTrue("'fn-group'" in err_list[0].message) def test_fn_p_with_no_content_with_funding_group(self): sample = io.BytesIO(b""" <article> <front> <article-meta> <funding-group> <award-group> <funding-source>Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis</funding-source> <award-id>234/07</award-id> </award-group> </funding-group> </article-meta> </front> <back> <fn-group> <fn id="fn1" fn-type="financial-disclosure"> <p/> </fn> </fn-group> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 1) self.assertTrue("'funding-group'" in err_list[0].message) def test_fn_p_and_awardid_with_no_content(self): sample = io.BytesIO(b""" <article> <front> <article-meta> <funding-group> <award-group> <funding-source>Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis</funding-source> <award-id/> </award-group> </funding-group> </article-meta> </front> <back> <fn-group> <fn id="fn1" fn-type="financial-disclosure"> <p/> </fn> </fn-group> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 1) self.assertTrue("'fn-group'" in err_list[0].message) def test_valid_with_contractno_after_formatting_markup(self): sample = io.BytesIO(b""" <article> <front> <article-meta> <funding-group> <award-group> <funding-source>Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis</funding-source> <award-id>CSV 234/07</award-id> </award-group> <funding-statement>This study was supported by the Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</funding-statement> </funding-group> </article-meta> </front> <back> <ack> <p>This study was supported by the <italic>Brazilian</italic> Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis, through the Project of International Technical Cooperation AD/BRA/03/H34 between the Brazilian Government and the United Nations Office on Drugs and Crime (Process CSV 234/07).</p> </ack> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 0) def test_valid_with_contractno_after_formatting_markup_as_first_element(self): sample = io.BytesIO(b""" <article> <front> <article-meta> <funding-group> <award-group> <funding-source>Brazilian Ministry of Health/Secretariat of Health Surveillance/Department of STD, AIDS and Viral Hepatitis</funding-source> <award-id>2010/03107-2</award-id> </award-group> <funding-statement>Apoio financeiro: Fundacao de Amparo a Pesquisa do Estado de Sao Paulo (FAPESP). Processo 2010/03107-2. Conselho Nacional de Desenvolvimento Cientifico e Tecnologico (CNPq). </funding-statement> </funding-group> </article-meta> </front> <back> <fn-group> <fn fn-type="financial-disclosure"> <p><bold>Apoio financeiro:</bold> Fundacao de Amparo a Pesquisa do Estado de Sao Paulo (FAPESP). Processo 2010/03107-2. Conselho Nacional de Desenvolvimento Cientifico e Tecnologico (CNPq).</p> </fn> </fn-group> </back> </article> """) et = etree.parse(sample) _, err_list = checks.funding_group((et, [])) self.assertEqual(len(err_list), 0) class DoctypePipeTests(unittest.TestCase): def test_missing_doctype(self): sample = io.BytesIO(b""" <article> ... </article> """) et = etree.parse(sample) _, err_list = checks.doctype((et, [])) self.assertEqual(len(err_list), 1) self.assertTrue("DOCTYPE" in err_list[0].message) def test_doctype(self): sample = io.BytesIO(b""" <!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.0 20120330//EN" "JATS-journalpublishing1.dtd"> <article> ... </article> """) et = etree.parse(sample) _, err_list = checks.doctype((et, [])) self.assertEqual(len(err_list), 0) class CountryCodesTests(unittest.TestCase): def test_valid_code(self): sample = io.BytesIO(b""" <article> <front> <article-meta> <aff id="aff1"> <label>I</label> <institution content-type="orgdiv2">Departamento de Fonoaudiologia</institution> <institution content-type="orgdiv1">Faculdade de Medicina</institution> <institution content-type="orgname">Universidade Federal do Rio de Janeiro</institution> <addr-line> <named-content content-type="city">Rio de Janeiro</named-content> <named-content content-type="state">RJ</named-content> </addr-line> <country country="BR">Brasil</country> <institution content-type="original">Departamento de Fonoaudiologia. Faculdade de Medicina. Universidade Federal do Rio de Janeiro. Rio de Janeiro, RJ, Brasil</institution> </aff> </article-meta> </front> </article> """) et = etree.parse(sample) _, err_list = checks.country_code((et, [])) self.assertEqual(len(err_list), 0) def test_valid_code_in_lowercase(self): sample = io.BytesIO(b""" <article> <front> <article-meta> <aff id="aff1"> <label>I</label> <institution content-type="orgdiv2">Departamento de Fonoaudiologia</institution> <institution content-type="orgdiv1">Faculdade de Medicina</institution> <institution content-type="orgname">Universidade Federal do Rio de Janeiro</institution> <addr-line> <named-content content-type="city">Rio de Janeiro</named-content> <named-content content-type="state">RJ</named-content> </addr-line> <country country="br">Brasil</country> <institution content-type="original">Departamento de Fonoaudiologia. Faculdade de Medicina. Universidade Federal do Rio de Janeiro. Rio de Janeiro, RJ, Brasil</institution> </aff> </article-meta> </front> </article> """) et = etree.parse(sample) _, err_list = checks.country_code((et, [])) self.assertEqual(len(err_list), 1) self.assertTrue("country" in err_list[0].message) def test_invalid_code(self): sample = io.BytesIO(b""" <article> <front> <article-meta> <aff id="aff1"> <label>I</label> <institution content-type="orgdiv2">Departamento de Fonoaudiologia</institution> <institution content-type="orgdiv1">Faculdade de Medicina</institution> <institution content-type="orgname">Universidade Federal do Rio de Janeiro</institution> <addr-line> <named-content content-type="city">Rio de Janeiro</named-content> <named-content content-type="state">RJ</named-content> </addr-line> <country country="INVALID">Brasil</country> <institution content-type="original">Departamento de Fonoaudiologia. Faculdade de Medicina. Universidade Federal do Rio de Janeiro. Rio de Janeiro, RJ, Brasil</institution> </aff> </article-meta> </front> </article> """) et = etree.parse(sample) _, err_list = checks.country_code((et, [])) self.assertEqual(len(err_list), 1) self.assertTrue("country" in err_list[0].message)
import os import time from .. import run_nbgrader from .base import BaseTestApp from .conftest import notwindows from ...utils import parse_utc @notwindows class TestNbGraderCollect(BaseTestApp): def _release_and_fetch(self, assignment, exchange, course_dir): self._copy_file(os.path.join("files", "test.ipynb"), os.path.join(course_dir, "release", "ps1", "p1.ipynb")) run_nbgrader([ "release", assignment, "--course", "abc101", "--TransferApp.exchange_directory={}".format(exchange) ]) run_nbgrader([ "fetch", assignment, "--course", "abc101", "--TransferApp.exchange_directory={}".format(exchange) ]) def _submit(self, assignment, exchange, cache): run_nbgrader([ "submit", assignment, "--course", "abc101", "--TransferApp.cache_directory={}".format(cache), "--TransferApp.exchange_directory={}".format(exchange) ]) def _collect(self, assignment, exchange, flags=None, retcode=0): cmd = [ "collect", assignment, "--course", "abc101", "--TransferApp.exchange_directory={}".format(exchange) ] if flags is not None: cmd.extend(flags) run_nbgrader(cmd, retcode=retcode) def _read_timestamp(self, root): with open(os.path.os.path.join(root, "timestamp.txt"), "r") as fh: timestamp = parse_utc(fh.read()) return timestamp def test_help(self): """Does the help display without error?""" run_nbgrader(["collect", "--help-all"]) def test_no_course_id(self, exchange, course_dir, cache): """Does releasing without a course id thrown an error?""" self._release_and_fetch("ps1", exchange, course_dir) self._submit("ps1", exchange, cache) cmd = [ "collect", "ps1", "--TransferApp.exchange_directory={}".format(exchange) ] run_nbgrader(cmd, retcode=1) def test_collect(self, exchange, course_dir, cache): self._release_and_fetch("ps1", exchange, course_dir) # try to collect when there"s nothing to collect self._collect("ps1", exchange) root = os.path.os.path.join(os.path.join(course_dir, "submitted", os.environ["USER"], "ps1")) assert not os.path.isdir(os.path.join(course_dir, "submitted")) # submit something self._submit("ps1", exchange, cache) time.sleep(1) # try to collect it self._collect("ps1", exchange) assert os.path.isfile(os.path.os.path.join(root, "p1.ipynb")) assert os.path.isfile(os.path.os.path.join(root, "timestamp.txt")) timestamp = self._read_timestamp(root) # try to collect it again self._collect("ps1", exchange) assert self._read_timestamp(root) == timestamp # submit again self._submit("ps1", exchange, cache) # collect again self._collect("ps1", exchange) assert self._read_timestamp(root) == timestamp # collect again with --update self._collect("ps1", exchange, ["--update"]) assert self._read_timestamp(root) != timestamp def test_collect_assignment_flag(self, exchange, course_dir, cache): self._release_and_fetch("ps1", exchange, course_dir) self._submit("ps1", exchange, cache) # try to collect when there"s nothing to collect self._collect("--assignment=ps1", exchange) root = os.path.os.path.join(os.path.join(course_dir, "submitted", os.environ["USER"], "ps1")) assert os.path.isfile(os.path.os.path.join(root, "p1.ipynb")) assert os.path.isfile(os.path.os.path.join(root, "timestamp.txt"))
import warnings from . import _minpack import numpy as np from numpy import (atleast_1d, dot, take, triu, shape, eye, transpose, zeros, prod, greater, asarray, inf, finfo, inexact, issubdtype, dtype) from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError, inv from scipy._lib._util import _asarray_validated, _lazywhere from scipy._lib._util import getfullargspec_no_self as _getfullargspec from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning from ._lsq import least_squares from ._lsq.least_squares import prepare_bounds error = _minpack.error __all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit'] def _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape=None): res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) if (output_shape is not None) and (shape(res) != output_shape): if (output_shape[0] != 1): if len(output_shape) > 1: if output_shape[1] == 1: return shape(res) msg = "%s: there is a mismatch between the input and output " \ "shape of the '%s' argument" % (checker, argname) func_name = getattr(thefunc, '__name__', None) if func_name: msg += " '%s'." % func_name else: msg += "." msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res)) raise TypeError(msg) if issubdtype(res.dtype, inexact): dt = res.dtype else: dt = dtype(float) return shape(res), dt def fsolve(func, x0, args=(), fprime=None, full_output=0, col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None, epsfcn=None, factor=100, diag=None): """ Find the roots of a function. Return the roots of the (non-linear) equations defined by ``func(x) = 0`` given a starting estimate. Parameters ---------- func : callable ``f(x, *args)`` A function that takes at least one (possibly vector) argument, and returns a value of the same length. x0 : ndarray The starting estimate for the roots of ``func(x) = 0``. args : tuple, optional Any extra arguments to `func`. fprime : callable ``f(x, *args)``, optional A function to compute the Jacobian of `func` with derivatives across the rows. By default, the Jacobian will be estimated. full_output : bool, optional If True, return optional outputs. col_deriv : bool, optional Specify whether the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). xtol : float, optional The calculation will terminate if the relative error between two consecutive iterates is at most `xtol`. maxfev : int, optional The maximum number of calls to the function. If zero, then ``100*(N+1)`` is the maximum where N is the number of elements in `x0`. band : tuple, optional If set to a two-sequence containing the number of sub- and super-diagonals within the band of the Jacobi matrix, the Jacobi matrix is considered banded (only for ``fprime=None``). epsfcn : float, optional A suitable step length for the forward-difference approximation of the Jacobian (for ``fprime=None``). If `epsfcn` is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float, optional A parameter determining the initial step bound (``factor * || diag * x||``). Should be in the interval ``(0.1, 100)``. diag : sequence, optional N positive entries that serve as a scale factors for the variables. Returns ------- x : ndarray The solution (or the result of the last iteration for an unsuccessful call). infodict : dict A dictionary of optional outputs with the keys: ``nfev`` number of function calls ``njev`` number of Jacobian calls ``fvec`` function evaluated at the output ``fjac`` the orthogonal matrix, q, produced by the QR factorization of the final approximate Jacobian matrix, stored column wise ``r`` upper triangular matrix produced by QR factorization of the same matrix ``qtf`` the vector ``(transpose(q) * fvec)`` ier : int An integer flag. Set to 1 if a solution was found, otherwise refer to `mesg` for more information. mesg : str If no solution is found, `mesg` details the cause of failure. See Also -------- root : Interface to root finding algorithms for multivariate functions. See the ``method=='hybr'`` in particular. Notes ----- ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms. Examples -------- Find a solution to the system of equations: ``x0*cos(x1) = 4, x1*x0 - x1 = 5``. >>> from scipy.optimize import fsolve >>> def func(x): ... return [x[0] * np.cos(x[1]) - 4, ... x[1] * x[0] - x[1] - 5] >>> root = fsolve(func, [1, 1]) >>> root array([6.50409711, 0.90841421]) >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0. array([ True, True]) """ options = {'col_deriv': col_deriv, 'xtol': xtol, 'maxfev': maxfev, 'band': band, 'eps': epsfcn, 'factor': factor, 'diag': diag} res = _root_hybr(func, x0, args, jac=fprime, **options) if full_output: x = res['x'] info = dict((k, res.get(k)) for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res) info['fvec'] = res['fun'] return x, info, res['status'], res['message'] else: status = res['status'] msg = res['message'] if status == 0: raise TypeError(msg) elif status == 1: pass elif status in [2, 3, 4, 5]: warnings.warn(msg, RuntimeWarning) else: raise TypeError(msg) return res['x'] def _root_hybr(func, x0, args=(), jac=None, col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None, factor=100, diag=None, **unknown_options): """ Find the roots of a multivariate function using MINPACK's hybrd and hybrj routines (modified Powell method). Options ------- col_deriv : bool Specify whether the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). xtol : float The calculation will terminate if the relative error between two consecutive iterates is at most `xtol`. maxfev : int The maximum number of calls to the function. If zero, then ``100*(N+1)`` is the maximum where N is the number of elements in `x0`. band : tuple If set to a two-sequence containing the number of sub- and super-diagonals within the band of the Jacobi matrix, the Jacobi matrix is considered banded (only for ``fprime=None``). eps : float A suitable step length for the forward-difference approximation of the Jacobian (for ``fprime=None``). If `eps` is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float A parameter determining the initial step bound (``factor * || diag * x||``). Should be in the interval ``(0.1, 100)``. diag : sequence N positive entries that serve as a scale factors for the variables. """ _check_unknown_options(unknown_options) epsfcn = eps x0 = asarray(x0).flatten() n = len(x0) if not isinstance(args, tuple): args = (args,) shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,)) if epsfcn is None: epsfcn = finfo(dtype).eps Dfun = jac if Dfun is None: if band is None: ml, mu = -10, -10 else: ml, mu = band[:2] if maxfev == 0: maxfev = 200 * (n + 1) retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev, ml, mu, epsfcn, factor, diag) else: _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n)) if (maxfev == 0): maxfev = 100 * (n + 1) retval = _minpack._hybrj(func, Dfun, x0, args, 1, col_deriv, xtol, maxfev, factor, diag) x, status = retval[0], retval[-1] errors = {0: "Improper input parameters were entered.", 1: "The solution converged.", 2: "The number of calls to function has " "reached maxfev = %d." % maxfev, 3: "xtol=%f is too small, no further improvement " "in the approximate\n solution " "is possible." % xtol, 4: "The iteration is not making good progress, as measured " "by the \n improvement from the last five " "Jacobian evaluations.", 5: "The iteration is not making good progress, " "as measured by the \n improvement from the last " "ten iterations.", 'unknown': "An error occurred."} info = retval[1] info['fun'] = info.pop('fvec') sol = OptimizeResult(x=x, success=(status == 1), status=status) sol.update(info) try: sol['message'] = errors[status] except KeyError: sol['message'] = errors['unknown'] return sol LEASTSQ_SUCCESS = [1, 2, 3, 4] LEASTSQ_FAILURE = [5, 6, 7, 8] def leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): """ Minimize the sum of squares of a set of equations. :: x = arg min(sum(func(y)**2,axis=0)) y Parameters ---------- func : callable Should take at least one (possibly length N vector) argument and returns M floating point numbers. It must not return NaNs or fitting might fail. x0 : ndarray The starting estimate for the minimization. args : tuple, optional Any extra arguments to func are placed in this tuple. Dfun : callable, optional A function or method to compute the Jacobian of func with derivatives across the rows. If this is None, the Jacobian will be estimated. full_output : bool, optional non-zero to return all optional outputs. col_deriv : bool, optional non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). ftol : float, optional Relative error desired in the sum of squares. xtol : float, optional Relative error desired in the approximate solution. gtol : float, optional Orthogonality desired between the function vector and the columns of the Jacobian. maxfev : int, optional The maximum number of calls to the function. If `Dfun` is provided, then the default `maxfev` is 100*(N+1) where N is the number of elements in x0, otherwise the default `maxfev` is 200*(N+1). epsfcn : float, optional A variable used in determining a suitable step length for the forward- difference approximation of the Jacobian (for Dfun=None). Normally the actual step length will be sqrt(epsfcn)*x If epsfcn is less than the machine precision, it is assumed that the relative errors are of the order of the machine precision. factor : float, optional A parameter determining the initial step bound (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. diag : sequence, optional N positive entries that serve as a scale factors for the variables. Returns ------- x : ndarray The solution (or the result of the last iteration for an unsuccessful call). cov_x : ndarray The inverse of the Hessian. `fjac` and `ipvt` are used to construct an estimate of the Hessian. A value of None indicates a singular matrix, which means the curvature in parameters `x` is numerically flat. To obtain the covariance matrix of the parameters `x`, `cov_x` must be multiplied by the variance of the residuals -- see curve_fit. infodict : dict a dictionary of optional outputs with the keys: ``nfev`` The number of function calls ``fvec`` The function evaluated at the output ``fjac`` A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. ``ipvt`` An integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. ``qtf`` The vector (transpose(q) * fvec). mesg : str A string message giving information about the cause of failure. ier : int An integer flag. If it is equal to 1, 2, 3 or 4, the solution was found. Otherwise, the solution was not found. In either case, the optional output variable 'mesg' gives more information. See Also -------- least_squares : Newer interface to solve nonlinear least-squares problems with bounds on the variables. See ``method=='lm'`` in particular. Notes ----- "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. cov_x is a Jacobian approximation to the Hessian of the least squares objective function. This approximation assumes that the objective function is based on the difference between some observed target data (ydata) and a (non-linear) function of the parameters `f(xdata, params)` :: func(params) = ydata - f(xdata, params) so that the objective function is :: min sum((ydata - f(xdata, params))**2, axis=0) params The solution, `x`, is always a 1-D array, regardless of the shape of `x0`, or whether `x0` is a scalar. Examples -------- >>> from scipy.optimize import leastsq >>> def func(x): ... return 2*(x-3)**2+1 >>> leastsq(func, 0) (array([2.99999999]), 1) """ x0 = asarray(x0).flatten() n = len(x0) if not isinstance(args, tuple): args = (args,) shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) m = shape[0] if n > m: raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m)) if epsfcn is None: epsfcn = finfo(dtype).eps if Dfun is None: if maxfev == 0: maxfev = 200*(n + 1) retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol, maxfev, epsfcn, factor, diag) else: if col_deriv: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) else: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) if maxfev == 0: maxfev = 100 * (n + 1) retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv, ftol, xtol, gtol, maxfev, factor, diag) errors = {0: ["Improper input parameters.", TypeError], 1: ["Both actual and predicted relative reductions " "in the sum of squares\n are at most %f" % ftol, None], 2: ["The relative error between two consecutive " "iterates is at most %f" % xtol, None], 3: ["Both actual and predicted relative reductions in " "the sum of squares\n are at most %f and the " "relative error between two consecutive " "iterates is at \n most %f" % (ftol, xtol), None], 4: ["The cosine of the angle between func(x) and any " "column of the\n Jacobian is at most %f in " "absolute value" % gtol, None], 5: ["Number of calls to function has reached " "maxfev = %d." % maxfev, ValueError], 6: ["ftol=%f is too small, no further reduction " "in the sum of squares\n is possible." % ftol, ValueError], 7: ["xtol=%f is too small, no further improvement in " "the approximate\n solution is possible." % xtol, ValueError], 8: ["gtol=%f is too small, func(x) is orthogonal to the " "columns of\n the Jacobian to machine " "precision." % gtol, ValueError]} # The FORTRAN return value (possible return values are >= 0 and <= 8) info = retval[-1] if full_output: cov_x = None if info in LEASTSQ_SUCCESS: perm = take(eye(n), retval[1]['ipvt'] - 1, 0) r = triu(transpose(retval[1]['fjac'])[:n, :]) R = dot(r, perm) try: cov_x = inv(dot(transpose(R), R)) except (LinAlgError, ValueError): pass return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info) else: if info in LEASTSQ_FAILURE: warnings.warn(errors[info][0], RuntimeWarning) elif info == 0: raise errors[info][1](errors[info][0]) return retval[0], info def _wrap_func(func, xdata, ydata, transform): if transform is None: def func_wrapped(params): return func(xdata, *params) - ydata elif transform.ndim == 1: def func_wrapped(params): return transform * (func(xdata, *params) - ydata) else: # Chisq = (y - yd)^T C^{-1} (y-yd) # transform = L such that C = L L^T # C^{-1} = L^{-T} L^{-1} # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd) # Define (y-yd)' = L^{-1} (y-yd) # by solving # L (y-yd)' = (y-yd) # and minimize (y-yd)'^T (y-yd)' def func_wrapped(params): return solve_triangular(transform, func(xdata, *params) - ydata, lower=True) return func_wrapped def _wrap_jac(jac, xdata, transform): if transform is None: def jac_wrapped(params): return jac(xdata, *params) elif transform.ndim == 1: def jac_wrapped(params): return transform[:, np.newaxis] * np.asarray(jac(xdata, *params)) else: def jac_wrapped(params): return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True) return jac_wrapped def _initialize_feasible(lb, ub): p0 = np.ones_like(lb) lb_finite = np.isfinite(lb) ub_finite = np.isfinite(ub) mask = lb_finite & ub_finite p0[mask] = 0.5 * (lb[mask] + ub[mask]) mask = lb_finite & ~ub_finite p0[mask] = lb[mask] + 1 mask = ~lb_finite & ub_finite p0[mask] = ub[mask] - 1 return p0 def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, check_finite=True, bounds=(-np.inf, np.inf), method=None, jac=None, **kwargs): """ Use non-linear least squares to fit a function, f, to data. Assumes ``ydata = f(xdata, *params) + eps``. Parameters ---------- f : callable The model function, f(x, ...). It must take the independent variable as the first argument and the parameters to fit as separate remaining arguments. xdata : array_like or object The independent variable where the data is measured. Should usually be an M-length sequence or an (k,M)-shaped array for functions with k predictors, but can actually be any object. ydata : array_like The dependent data, a length M array - nominally ``f(xdata, ...)``. p0 : array_like, optional Initial guess for the parameters (length N). If None, then the initial values will all be 1 (if the number of parameters for the function can be determined using introspection, otherwise a ValueError is raised). sigma : None or M-length sequence or MxM array, optional Determines the uncertainty in `ydata`. If we define residuals as ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma` depends on its number of dimensions: - A 1-D `sigma` should contain values of standard deviations of errors in `ydata`. In this case, the optimized function is ``chisq = sum((r / sigma) ** 2)``. - A 2-D `sigma` should contain the covariance matrix of errors in `ydata`. In this case, the optimized function is ``chisq = r.T @ inv(sigma) @ r``. .. versionadded:: 0.19 None (default) is equivalent of 1-D `sigma` filled with ones. absolute_sigma : bool, optional If True, `sigma` is used in an absolute sense and the estimated parameter covariance `pcov` reflects these absolute values. If False (default), only the relative magnitudes of the `sigma` values matter. The returned parameter covariance matrix `pcov` is based on scaling `sigma` by a constant factor. This constant is set by demanding that the reduced `chisq` for the optimal parameters `popt` when using the *scaled* `sigma` equals unity. In other words, `sigma` is scaled to match the sample variance of the residuals after the fit. Default is False. Mathematically, ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)`` check_finite : bool, optional If True, check that the input arrays do not contain nans of infs, and raise a ValueError if they do. Setting this parameter to False may silently produce nonsensical results if the input arrays do contain nans. Default is True. bounds : 2-tuple of array_like, optional Lower and upper bounds on parameters. Defaults to no bounds. Each element of the tuple must be either an array with the length equal to the number of parameters, or a scalar (in which case the bound is taken to be the same for all parameters). Use ``np.inf`` with an appropriate sign to disable bounds on all or some parameters. .. versionadded:: 0.17 method : {'lm', 'trf', 'dogbox'}, optional Method to use for optimization. See `least_squares` for more details. Default is 'lm' for unconstrained problems and 'trf' if `bounds` are provided. The method 'lm' won't work when the number of observations is less than the number of variables, use 'trf' or 'dogbox' in this case. .. versionadded:: 0.17 jac : callable, string or None, optional Function with signature ``jac(x, ...)`` which computes the Jacobian matrix of the model function with respect to parameters as a dense array_like structure. It will be scaled according to provided `sigma`. If None (default), the Jacobian will be estimated numerically. String keywords for 'trf' and 'dogbox' methods can be used to select a finite difference scheme, see `least_squares`. .. versionadded:: 0.18 kwargs Keyword arguments passed to `leastsq` for ``method='lm'`` or `least_squares` otherwise. Returns ------- popt : array Optimal values for the parameters so that the sum of the squared residuals of ``f(xdata, *popt) - ydata`` is minimized. pcov : 2-D array The estimated covariance of popt. The diagonals provide the variance of the parameter estimate. To compute one standard deviation errors on the parameters use ``perr = np.sqrt(np.diag(pcov))``. How the `sigma` parameter affects the estimated covariance depends on `absolute_sigma` argument, as described above. If the Jacobian matrix at the solution doesn't have a full rank, then 'lm' method returns a matrix filled with ``np.inf``, on the other hand 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute the covariance matrix. Raises ------ ValueError if either `ydata` or `xdata` contain NaNs, or if incompatible options are used. RuntimeError if the least-squares minimization fails. OptimizeWarning if covariance of the parameters can not be estimated. See Also -------- least_squares : Minimize the sum of squares of nonlinear functions. scipy.stats.linregress : Calculate a linear least squares regression for two sets of measurements. Notes ----- With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm through `leastsq`. Note that this algorithm can only deal with unconstrained problems. Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to the docstring of `least_squares` for more information. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.optimize import curve_fit >>> def func(x, a, b, c): ... return a * np.exp(-b * x) + c Define the data to be fit with some noise: >>> xdata = np.linspace(0, 4, 50) >>> y = func(xdata, 2.5, 1.3, 0.5) >>> np.random.seed(1729) >>> y_noise = 0.2 * np.random.normal(size=xdata.size) >>> ydata = y + y_noise >>> plt.plot(xdata, ydata, 'b-', label='data') Fit for the parameters a, b, c of the function `func`: >>> popt, pcov = curve_fit(func, xdata, ydata) >>> popt array([ 2.55423706, 1.35190947, 0.47450618]) >>> plt.plot(xdata, func(xdata, *popt), 'r-', ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) Constrain the optimization to the region of ``0 <= a <= 3``, ``0 <= b <= 1`` and ``0 <= c <= 0.5``: >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5])) >>> popt array([ 2.43708906, 1. , 0.35015434]) >>> plt.plot(xdata, func(xdata, *popt), 'g--', ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) >>> plt.xlabel('x') >>> plt.ylabel('y') >>> plt.legend() >>> plt.show() """ if p0 is None: # determine number of parameters by inspecting the function sig = _getfullargspec(f) args = sig.args if len(args) < 2: raise ValueError("Unable to determine number of fit parameters.") n = len(args) - 1 else: p0 = np.atleast_1d(p0) n = p0.size lb, ub = prepare_bounds(bounds, n) if p0 is None: p0 = _initialize_feasible(lb, ub) bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) if method is None: if bounded_problem: method = 'trf' else: method = 'lm' if method == 'lm' and bounded_problem: raise ValueError("Method 'lm' only works for unconstrained problems. " "Use 'trf' or 'dogbox' instead.") # optimization may produce garbage for float32 inputs, cast them to float64 # NaNs cannot be handled if check_finite: ydata = np.asarray_chkfinite(ydata, float) else: ydata = np.asarray(ydata, float) if isinstance(xdata, (list, tuple, np.ndarray)): # `xdata` is passed straight to the user-defined `f`, so allow # non-array_like `xdata`. if check_finite: xdata = np.asarray_chkfinite(xdata, float) else: xdata = np.asarray(xdata, float) if ydata.size == 0: raise ValueError("`ydata` must not be empty!") # Determine type of sigma if sigma is not None: sigma = np.asarray(sigma) # if 1-D, sigma are errors, define transform = 1/sigma if sigma.shape == (ydata.size, ): transform = 1.0 / sigma # if 2-D, sigma is the covariance matrix, # define transform = L such that L L^T = C elif sigma.shape == (ydata.size, ydata.size): try: # scipy.linalg.cholesky requires lower=True to return L L^T = A transform = cholesky(sigma, lower=True) except LinAlgError: raise ValueError("`sigma` must be positive definite.") else: raise ValueError("`sigma` has incorrect shape.") else: transform = None func = _wrap_func(f, xdata, ydata, transform) if callable(jac): jac = _wrap_jac(jac, xdata, transform) elif jac is None and method != 'lm': jac = '2-point' if 'args' in kwargs: # The specification for the model function `f` does not support # additional arguments. Refer to the `curve_fit` docstring for # acceptable call signatures of `f`. raise ValueError("'args' is not a supported keyword argument.") if method == 'lm': # Remove full_output from kwargs, otherwise we're passing it in twice. return_full = kwargs.pop('full_output', False) res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) popt, pcov, infodict, errmsg, ier = res ysize = len(infodict['fvec']) cost = np.sum(infodict['fvec'] ** 2) if ier not in [1, 2, 3, 4]: raise RuntimeError("Optimal parameters not found: " + errmsg) else: # Rename maxfev (leastsq) to max_nfev (least_squares), if specified. if 'max_nfev' not in kwargs: kwargs['max_nfev'] = kwargs.pop('maxfev', None) res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, **kwargs) if not res.success: raise RuntimeError("Optimal parameters not found: " + res.message) ysize = len(res.fun) cost = 2 * res.cost # res.cost is half sum of squares! popt = res.x # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(res.jac, full_matrices=False) threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] pcov = np.dot(VT.T / s**2, VT) return_full = False warn_cov = False if pcov is None: # indeterminate covariance pcov = zeros((len(popt), len(popt)), dtype=float) pcov.fill(inf) warn_cov = True elif not absolute_sigma: if ysize > p0.size: s_sq = cost / (ysize - p0.size) pcov = pcov * s_sq else: pcov.fill(inf) warn_cov = True if warn_cov: warnings.warn('Covariance of the parameters could not be estimated', category=OptimizeWarning) if return_full: return popt, pcov, infodict, errmsg, ier else: return popt, pcov def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x = x.reshape((n,)) fvec = atleast_1d(fcn(x, *args)) m = len(fvec) fvec = fvec.reshape((m,)) ldfjac = m fjac = atleast_1d(Dfcn(x, *args)) fjac = fjac.reshape((m, n)) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,), float) err = zeros((m,), float) fvecp = None _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err) fvecp = atleast_1d(fcn(xp, *args)) fvecp = fvecp.reshape((m,)) _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err) good = (prod(greater(err, 0.5), axis=0)) return (good, err) def _del2(p0, p1, d): return p0 - np.square(p1 - p0) / d def _relerr(actual, desired): return (actual - desired) / desired def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel): p0 = x0 for i in range(maxiter): p1 = func(p0, *args) if use_accel: p2 = func(p1, *args) d = p2 - 2.0 * p1 + p0 p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2) else: p = p1 relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p) if np.all(np.abs(relerr) < xtol): return p p0 = p msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) raise RuntimeError(msg) def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'): """ Find a fixed point of the function. Given a function of one or more variables and a starting point, find a fixed point of the function: i.e., where ``func(x0) == x0``. Parameters ---------- func : function Function to evaluate. x0 : array_like Fixed point of function. args : tuple, optional Extra arguments to `func`. xtol : float, optional Convergence tolerance, defaults to 1e-08. maxiter : int, optional Maximum number of iterations, defaults to 500. method : {"del2", "iteration"}, optional Method of finding the fixed-point, defaults to "del2", which uses Steffensen's Method with Aitken's ``Del^2`` convergence acceleration [1]_. The "iteration" method simply iterates the function until convergence is detected, without attempting to accelerate the convergence. References ---------- .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 Examples -------- >>> from scipy import optimize >>> def func(x, c1, c2): ... return np.sqrt(c1/(x+c2)) >>> c1 = np.array([10,12.]) >>> c2 = np.array([3, 5.]) >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2)) array([ 1.4920333 , 1.37228132]) """ use_accel = {'del2': True, 'iteration': False}[method] x0 = _asarray_validated(x0, as_inexact=True) return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
from django import template from datatables.utils import lookupattr register = template.Library() register.filter('lookupattr', lookupattr)
"""A module that handles matrices. Includes functions for fast creating matrices like zero, one/eye, random matrix etc. """ from matrices import Matrix, SMatrix, zero, zeronm, zeros, one, ones, eye, \ hessian, randMatrix, GramSchmidt, wronskian, casoratian, \ list2numpy, matrix2numpy, DeferredVector, block_diag
from __future__ import absolute_import from django.conf import settings def is_active_superuser(request): user = getattr(request, 'user', None) if not user: return False if settings.INTERNAL_IPS: ip = request.META['REMOTE_ADDR'] if not any(ip in addr for addr in settings.INTERNAL_IPS): return False return user.is_superuser
""" Additional tests for PandasArray that aren't covered by the interface tests. """ import numpy as np import pytest import pandas as pd import pandas._testing as tm from pandas.arrays import PandasArray from pandas.core.arrays.numpy_ import PandasDtype @pytest.fixture( params=[ np.array(["a", "b"], dtype=object), np.array([0, 1], dtype=float), np.array([0, 1], dtype=int), np.array([0, 1 + 2j], dtype=complex), np.array([True, False], dtype=bool), np.array([0, 1], dtype="datetime64[ns]"), np.array([0, 1], dtype="timedelta64[ns]"), ] ) def any_numpy_array(request): """ Parametrized fixture for NumPy arrays with different dtypes. This excludes string and bytes. """ return request.param @pytest.mark.parametrize( "dtype, expected", [ ("bool", True), ("int", True), ("uint", True), ("float", True), ("complex", True), ("str", False), ("bytes", False), ("datetime64[ns]", False), ("object", False), ("void", False), ], ) def test_is_numeric(dtype, expected): dtype = PandasDtype(dtype) assert dtype._is_numeric is expected @pytest.mark.parametrize( "dtype, expected", [ ("bool", True), ("int", False), ("uint", False), ("float", False), ("complex", False), ("str", False), ("bytes", False), ("datetime64[ns]", False), ("object", False), ("void", False), ], ) def test_is_boolean(dtype, expected): dtype = PandasDtype(dtype) assert dtype._is_boolean is expected def test_repr(): dtype = PandasDtype(np.dtype("int64")) assert repr(dtype) == "PandasDtype('int64')" def test_constructor_from_string(): result = PandasDtype.construct_from_string("int64") expected = PandasDtype(np.dtype("int64")) assert result == expected def test_constructor_no_coercion(): with pytest.raises(ValueError, match="NumPy array"): PandasArray([1, 2, 3]) def test_series_constructor_with_copy(): ndarray = np.array([1, 2, 3]) ser = pd.Series(PandasArray(ndarray), copy=True) assert ser.values is not ndarray def test_series_constructor_with_astype(): ndarray = np.array([1, 2, 3]) result = pd.Series(PandasArray(ndarray), dtype="float64") expected = pd.Series([1.0, 2.0, 3.0], dtype="float64") tm.assert_series_equal(result, expected) def test_from_sequence_dtype(): arr = np.array([1, 2, 3], dtype="int64") result = PandasArray._from_sequence(arr, dtype="uint64") expected = PandasArray(np.array([1, 2, 3], dtype="uint64")) tm.assert_extension_array_equal(result, expected) def test_constructor_copy(): arr = np.array([0, 1]) result = PandasArray(arr, copy=True) assert np.shares_memory(result._ndarray, arr) is False def test_constructor_with_data(any_numpy_array): nparr = any_numpy_array arr = PandasArray(nparr) assert arr.dtype.numpy_dtype == nparr.dtype def test_to_numpy(): arr = PandasArray(np.array([1, 2, 3])) result = arr.to_numpy() assert result is arr._ndarray result = arr.to_numpy(copy=True) assert result is not arr._ndarray result = arr.to_numpy(dtype="f8") expected = np.array([1, 2, 3], dtype="f8") tm.assert_numpy_array_equal(result, expected) def test_setitem_series(): ser = pd.Series([1, 2, 3]) ser.array[0] = 10 expected = pd.Series([10, 2, 3]) tm.assert_series_equal(ser, expected) def test_setitem(any_numpy_array): nparr = any_numpy_array arr = PandasArray(nparr, copy=True) arr[0] = arr[1] nparr[0] = nparr[1] tm.assert_numpy_array_equal(arr.to_numpy(), nparr) def test_bad_reduce_raises(): arr = np.array([1, 2, 3], dtype="int64") arr = PandasArray(arr) msg = "cannot perform not_a_method with type int" with pytest.raises(TypeError, match=msg): arr._reduce(msg) def test_validate_reduction_keyword_args(): arr = PandasArray(np.array([1, 2, 3])) msg = "the 'keepdims' parameter is not supported .*all" with pytest.raises(ValueError, match=msg): arr.all(keepdims=True) def test_ufunc(): arr = PandasArray(np.array([-1.0, 0.0, 1.0])) result = np.abs(arr) expected = PandasArray(np.abs(arr._ndarray)) tm.assert_extension_array_equal(result, expected) r1, r2 = np.divmod(arr, np.add(arr, 2)) e1, e2 = np.divmod(arr._ndarray, np.add(arr._ndarray, 2)) e1 = PandasArray(e1) e2 = PandasArray(e2) tm.assert_extension_array_equal(r1, e1) tm.assert_extension_array_equal(r2, e2) def test_basic_binop(): # Just a basic smoke test. The EA interface tests exercise this # more thoroughly. x = PandasArray(np.array([1, 2, 3])) result = x + x expected = PandasArray(np.array([2, 4, 6])) tm.assert_extension_array_equal(result, expected) @pytest.mark.parametrize("dtype", [None, object]) def test_setitem_object_typecode(dtype): arr = PandasArray(np.array(["a", "b", "c"], dtype=dtype)) arr[0] = "t" expected = PandasArray(np.array(["t", "b", "c"], dtype=dtype)) tm.assert_extension_array_equal(arr, expected) def test_setitem_no_coercion(): # https://github.com/pandas-dev/pandas/issues/28150 arr = PandasArray(np.array([1, 2, 3])) with pytest.raises(ValueError, match="int"): arr[0] = "a" # With a value that we do coerce, check that we coerce the value # and not the underlying array. arr[0] = 2.5 assert isinstance(arr[0], (int, np.integer)), type(arr[0]) def test_setitem_preserves_views(): # GH#28150, see also extension test of the same name arr = PandasArray(np.array([1, 2, 3])) view1 = arr.view() view2 = arr[:] view3 = np.asarray(arr) arr[0] = 9 assert view1[0] == 9 assert view2[0] == 9 assert view3[0] == 9 arr[-1] = 2.5 view1[-1] = 5 assert arr[-1] == 5
import sys from klampt import * from klampt.glrobotprogram import * keymap = None def build_default_keymap(world): """builds a default keymape: 1234567890 increases values of DOFs 1-10 of robot 0. qwertyuiop decreases values.""" if world.numRobots() == 0: return {} robot = world.robot(0) up = '1234567890' down = 'qwertyuiop' res = {} for i in range(min(robot.numDrivers(),10)): #up velocity vel = [0]*robot.numLinks() if robot.driver(i).getType() == 'normal': vel[robot.driver(i).getAffectedLink()] = 1 else: #skip it #links = robot.driver(i).getAffectedLinks(); continue res[up[i]] = (0,vel) #down velocity vel = vectorops.mul(vel,-1) res[down[i]] = (0,vel) return res glutspecialmap = { GLUT_KEY_F1:'f1', GLUT_KEY_F2:'f2', GLUT_KEY_F3:'f3', GLUT_KEY_F4:'f4', GLUT_KEY_F5:'f5', GLUT_KEY_F6:'f6', GLUT_KEY_F7:'f7', GLUT_KEY_F8:'f8', GLUT_KEY_F9:'f9', GLUT_KEY_F10:'f10', GLUT_KEY_F11:'f11', GLUT_KEY_F12:'f12', GLUT_KEY_LEFT:'left', GLUT_KEY_UP:'up', GLUT_KEY_RIGHT:'right', GLUT_KEY_DOWN:'down', GLUT_KEY_PAGE_UP:'pageup', GLUT_KEY_PAGE_DOWN:'pagedown', GLUT_KEY_HOME:'home', GLUT_KEY_END:'end', GLUT_KEY_INSERT:'insert' } class MyGLViewer(GLSimulationProgram): def __init__(self,world): global keymap GLSimulationProgram.__init__(self,world,"My GL program") self.world = world if keymap == None: keymap = build_default_keymap(world) self.keymap = keymap self.current_velocities = {} #Put your initialization code here def control_loop(self): #Calculate the desired velocity for each robot by adding up all #commands rvels = [[0]*self.world.robot(r).numLinks() for r in range(self.world.numRobots())] for (c,(r,v)) in self.current_velocities.iteritems(): rvels[r] = vectorops.add(rvels[r],v) #print rvels #send to the robot(s) for r in range(self.world.numRobots()): robotController = self.sim.controller(r) qdes = robotController.getCommandedConfig() qdes = vectorops.madd(qdes,rvels[r],self.dt) #clamp to joint limits (qmin,qmax) = self.world.robot(r).getJointLimits() for i in xrange(len(qdes)): qdes[i] = min(qmax[i],max(qdes[i],qmin[i])) robotController.setPIDCommand(qdes,rvels[r]) return def mousefunc(self,button,state,x,y): #Put your mouse handler here #the current example prints out the list of objects clicked whenever #you right click if button==2: if state==0: print [o.getName() for o in self.click_world(x,y)] return GLRealtimeProgram.mousefunc(self,button,state,x,y) def specialfunc(self,c,x,y): #Put your keyboard special character handler here if c in glutspecialmap: name = glutspecialmap[c] if name in self.keymap: self.current_velocities[name]=self.keymap[name] pass def specialupfunc(self,c,x,y): #Put your keyboard special character handler here if c in glutspecialmap: name = glutspecialmap[c] if name in self.current_velocities: del self.current_velocities[name] pass def keyboardfunc(self,c,x,y): #Put your keyboard handler here #the current example toggles simulation / movie mode if c == 's': self.simulate = not self.simulate print "Simulating:",self.simulate elif c == 'm': self.saveScreenshots = not self.saveScreenshots print "Movie mode:",self.saveScreenshots elif c == 'h': print 'Available keys:',sorted(self.keymap.keys()) elif c in self.keymap: self.current_velocities[c]=self.keymap[c] glutPostRedisplay() def keyboardupfunc(self,c,x,y): if c in self.current_velocities: del self.current_velocities[c] return if __name__ == "__main__": print "kbdrive.py: This example demonstrates how to drive a robot using keyboard input" if len(sys.argv)<=1: print "USAGE: kbdrive.py [world_file]" exit() world = WorldModel() for fn in sys.argv[1:]: res = world.readFile(fn) if not res: raise RuntimeError("Unable to load model "+fn) viewer = MyGLViewer(world) viewer.run()
""" Various utility functions. ----- Permission to use, modify, and distribute this software is given under the terms of the NumPy License. See http://scipy.org. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. Author: Pearu Peterson <pearu@cens.ioc.ee> Created: May 2006 ----- """ __all__ = ['split_comma', 'specs_split_comma', 'ParseError','AnalyzeError', 'get_module_file','parse_bind','parse_result','is_name','parse_array_spec', 'CHAR_BIT','str2stmt', 'classes'] import re import os, glob import sys import traceback class ParseError(Exception): pass class AnalyzeError(Exception): pass is_name = re.compile(r'^[a-z_]\w*$',re.I).match name_re = re.compile(r'[a-z_]\w*',re.I).match is_entity_decl = re.compile(r'^[a-z_]\w*',re.I).match is_int_literal_constant = re.compile(r'^\d+(_\w+|)$').match module_file_extensions = ['.f', '.f90', '.f95', '.f03', '.f08'] def split_comma(line, item = None, comma=',', keep_empty=False): items = [] if item is None: for s in line.split(comma): s = s.strip() if not s and not keep_empty: continue items.append(s) return items if not line.strip(): # we may have blank space so strip the line return [] newitem = item.copy(line, True) apply_map = newitem.apply_map for s in newitem.get_line().split(comma): s = apply_map(s).strip() if not s and not keep_empty: continue items.append(s) return items def parse_array_spec(line, item = None): items = [] for spec in split_comma(line, item): items.append(tuple(split_comma(spec, item, comma=':', keep_empty=True))) return items def specs_split_comma(line, item = None, upper=False): specs0 = split_comma(line, item) specs = [] for spec in specs0: i = spec.find('=') if i!=-1: kw = spec[:i].strip().upper() v = spec[i+1:].strip() specs.append('%s = %s' % (kw, v)) else: if upper: spec = spec.upper() specs.append(spec) return specs def parse_bind(line, item = None): if not line.lower().startswith('bind'): return None, line if item is not None: newitem = item.copy(line, apply_map=True) newline = newitem.get_line() else: newitem = None newline = newline[4:].lstrip() i = newline.find(')') assert i!=-1,`newline` args = [] for a in specs_split_comma(newline[1:i].strip(), newitem, upper=True): args.append(a) rest = newline[i+1:].lstrip() if item is not None: rest = newitem.apply_map(rest) return args, rest def parse_result(line, item = None): if not line.lower().startswith('result'): return None, line line = line[6:].lstrip() i = line.find(')') assert i != -1,`line` name = line[1:i].strip() assert is_name(name),`name` return name, line[i+1:].lstrip() def filter_stmts(content, classes): """ Pop and return classes instances from content. """ stmts = [] indices = [] for i in range(len(content)): stmt = content[i] if isinstance(stmt, classes): stmts.append(stmt) indices.append(i) indices.reverse() for i in indices: del content[i] return stmts def get_module_files(directory, _cache={}): if directory in _cache: return _cache[directory] module_line = re.compile(r'(\A|^)module\s+(?P<name>\w+)\s*(!.*|)$',re.I | re.M) d = {} files = [] for ext in module_file_extensions: files += glob.glob(os.path.join(directory,'*'+ext)) for fn in files: f = open(fn,'r') for name in module_line.findall(f.read()): name = name[1] if name in d: print d[name],'already defines',name continue d[name] = fn _cache[directory] = d return d def get_module_file(name, directory, _cache={}): fn = _cache.get(name, None) if fn is not None: return fn if name.endswith('_module'): for ext in module_file_extensions: f1 = os.path.join(directory,name[:-7]+ext) if os.path.isfile(f1): _cache[name] = fn return f1 files = [] for ext in module_file_extensions: files += glob.glob(os.path.join(directory,'*'+ext)) for fn in files: if module_in_file(name, fn): _cache[name] = fn return fn return def module_in_file(name, filename): name = name.lower() pattern = re.compile(r'\s*module\s+(?P<name>[a-z]\w*)', re.I).match f = open(filename,'r') for line in f: m = pattern(line) if m and m.group('name').lower()==name: f.close() return filename f.close() def str2stmt(string, isfree=True, isstrict=False): """ Convert Fortran code to Statement tree. """ from readfortran import Line, FortranStringReader from parsefortran import FortranParser reader = FortranStringReader(string, isfree, isstrict) parser = FortranParser(reader) parser.parse() parser.analyze() block = parser.block while len(block.content)==1: block = block.content[0] return block def get_char_bit(): import numpy one = numpy.ubyte(1) two = numpy.ubyte(2) n = numpy.ubyte(2) i = 1 while n>=two: n <<= one i += 1 return i CHAR_BIT = get_char_bit() def show_item_on_failure(func, _exception_depth=[0]): """ Decorator for analyze methods. """ def new_func(self): try: func(self) except AnalyzeError, msg: clsname = self.__class__.__name__ self.error('%s.analyze error: %s' % (clsname,msg)) traceback.print_exc() except ParseError, msg: self.error('parse error: %s' % (msg)) except Exception, msg: _exception_depth[0] += 1 if _exception_depth[0]==1: self.error('exception triggered here: %s %s' % (Exception, msg)) raise _exception_depth[0] = 0 return new_func _classes_cache = {} class meta_classes(type): """ Meta class for ``classes``. """ __abstractmethods__ = False def __getattr__(self, name): # Expose created classes only as attributes to ``classes`` type. cls = _classes_cache.get(name) if cls is None: raise AttributeError('instance does not have attribute %r' % (name)) return cls class classes(type): """Make classes available as attributes of this class. To add a class to the attributes list, one must use:: __metaclass__ = classes in the definition of the class. In addition, apply the following tasks: * decorate analyze methods with show_item_on_failure """ __metaclass__ = meta_classes def __new__(metacls, name, bases, dict): if 'analyze' in dict: dict['analyze'] = show_item_on_failure(dict['analyze']) cls = type.__new__(metacls, name, bases, dict) _classes_cache[name] = cls return cls
from setuptools import setup, find_packages description=""" Ligthweight connection pooler for PostgreSQL. """ long_description = """ * **Documentation**: TODO * **Project page**: TODO """ setup( name="pgbouncer-ng", version=':versiontools:pgbouncerlib:', url='https://github.com/niwibe/pgbouncer-ng', license='BSD', platforms=['OS Independent'], description = description.strip(), long_description = long_description.strip(), author = 'Andrei Antoukh', author_email = 'niwi@niwi.be', maintainer = 'Andrei Antoukh', maintainer_email = 'niwi@niwi.be', packages = ['pgbouncerlib'], include_package_data = True, scripts = ['pgbouncer-ng'], install_requires=[ 'distribute', ], setup_requires = [ 'versiontools >= 1.8', ], data_files=[ ('/etc', ['pgbouncerng.ini']), ], zip_safe = False, classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Operating System :: POSIX', ] )
"""Ops that consume or generate index-based pointers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_functional_ops from tensorflow.python.ops.ragged import ragged_gather_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_where_op from tensorflow.python.ops.ragged import segment_id_ops def gather_with_default(params, indices, default, name=None, axis=0): """Gather slices with `indices=-1` mapped to `default`. This operation is similar to `tf.gather()`, except that any value of `-1` in `indices` will be mapped to `default`. Example: ```python >>> gather_with_default(['a', 'b', 'c', 'd'], [2, 0, -1, 2, -1], '_').eval() array(['c', 'a', '_', 'c', '_'], dtype=object) ``` Args: params: The `Tensor` from which to gather values. Must be at least rank `axis + 1`. indices: The index `Tensor`. Must have dtype `int32` or `int64`, and values must be in the range `[-1, params.shape[axis])`. default: The value to use when `indices` is `-1`. `default.shape` must be equal to `params.shape[axis + 1:]`. name: A name for the operation (optional). axis: The axis in `params` to gather `indices` from. Must be a scalar `int32` or `int64`. Supports negative indices. Returns: A `Tensor` with the same type as `param`, and with shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`. """ # This implementation basically just concatenates the default value and # the params together, and then uses gather(default_plus_params, indices + 1) # to get the appropriate values. Most of the complexity below has to do # with properly handling cases where axis != 0, in which case we need to tile # the default before concatenating it. with ops.name_scope(name, 'GatherWithDefault', [params, indices, default, axis]): # Convert inputs to tensors. indices = ops.convert_to_tensor( indices, name='indices', preferred_dtype=dtypes.int32) params = ops.convert_to_tensor(params, name='params') default = ops.convert_to_tensor(default, name='default', dtype=params.dtype) if axis == 0: tiled_default = array_ops.stack([default]) else: # Get ranks & shapes of inputs. params_rank = array_ops.rank(params) params_shape = array_ops.shape(params) default_shape = array_ops.shape(default) outer_params_shape = params_shape[:axis] # This will equal `axis` if axis>=0. outer_params_rank = array_ops.shape(outer_params_shape)[0] # Add dimensions (with size=1) to default, so its rank matches params. new_shape = array_ops.concat([ array_ops.ones([outer_params_rank + 1], dtypes.int32), default_shape ], axis=0) reshaped_default = array_ops.reshape(default, new_shape) # Tile the default for any dimension dim<axis, so its size matches params. multiples = array_ops.concat([ outer_params_shape, array_ops.ones(params_rank - outer_params_rank, dtypes.int32) ], axis=0) tiled_default = array_ops.tile(reshaped_default, multiples) # Prepend the default value to params (on the chosen axis). Thus, the # default value is at index 0, and all other values have their index # incremented by one. default_plus_params = array_ops.concat([tiled_default, params], axis=axis) return array_ops.gather(default_plus_params, indices + 1, axis=axis) def span_overlaps(source_start, source_limit, target_start, target_limit, contains=False, contained_by=False, partial_overlap=False, name=None): """Returns a boolean tensor indicating which source and target spans overlap. The source and target spans are specified using B+1 dimensional tensors, with `B>=0` batch dimensions followed by a final dimension that lists the span offsets for each span in the batch: * The `i`th source span in batch `b1...bB` starts at `source_start[b1...bB, i]` (inclusive), and extends to just before `source_limit[b1...bB, i]` (exclusive). * The `j`th target span in batch `b1...bB` starts at `target_start[b1...bB, j]` (inclusive), and extends to just before `target_limit[b1...bB, j]` (exclusive). `result[b1...bB, i, j]` is true if the `i`th source span overlaps with the `j`th target span in batch `b1...bB`, where a source span overlaps a target span if any of the following are true: * The spans are identical. * `contains` is true, and the source span contains the target span. * `contained_by` is true, and the source span is contained by the target span. * `partial_overlap` is true, and there is a non-zero overlap between the source span and the target span. Args: source_start: A B+1 dimensional potentially ragged tensor with shape `[D1...DB, source_size]`: the start offset of each source span. source_limit: A B+1 dimensional potentially ragged tensor with shape `[D1...DB, source_size]`: the limit offset of each source span. target_start: A B+1 dimensional potentially ragged tensor with shape `[D1...DB, target_size]`: the start offset of each target span. target_limit: A B+1 dimensional potentially ragged tensor with shape `[D1...DB, target_size]`: the limit offset of each target span. contains: If true, then a source span is considered to overlap a target span when the source span contains the target span. contained_by: If true, then a source span is considered to overlap a target span when the source span is contained by the target span. partial_overlap: If true, then a source span is considered to overlap a target span when the source span partially overlaps the target span. name: A name for the operation (optional). Returns: A B+2 dimensional potentially ragged boolean tensor with shape `[D1...DB, source_size, target_size]`. Raises: ValueError: If the span tensors are incompatible. #### Example: Given the following source and target spans (with no batch dimensions): ```python # 0 5 10 15 20 25 30 35 40 # |====|====|====|====|====|====|====|====| # Source: [-0-] [-1-] [2] [-3-][-4-][-5-] # Target: [-0-][-1-] [-2-] [3] [-4-][-5-] # |====|====|====|====|====|====|====|====| >>> source_start = [0, 10, 16, 20, 25, 30] >>> source_limit = [5, 15, 19, 25, 30, 35] >>> target_start = [0, 5, 15, 21, 27, 31] >>> target_limit = [5, 10, 20, 24, 32, 37] ``` `result[i, j]` will be true at the following locations: * `[0, 0]` (always) * `[2, 2]` (if contained_by=True or partial_overlaps=True) * `[3, 3]` (if contains=True or partial_overlaps=True) * `[4, 4]` (if partial_overlaps=True) * `[5, 5]` (if partial_overlaps=True) """ _check_type(contains, 'contains', bool) _check_type(contained_by, 'contained_by', bool) _check_type(partial_overlap, 'partial_overlap', bool) scope_tensors = [source_start, source_limit, target_start, target_limit] with ops.name_scope(name, 'SpanOverlaps', scope_tensors): # Convert input tensors. source_start = ragged_tensor.convert_to_tensor_or_ragged_tensor( source_start, name='source_start') source_limit = ragged_tensor.convert_to_tensor_or_ragged_tensor( source_limit, name='source_limit') target_start = ragged_tensor.convert_to_tensor_or_ragged_tensor( target_start, name='target_start') target_limit = ragged_tensor.convert_to_tensor_or_ragged_tensor( target_limit, name='target_limit') span_tensors = [source_start, source_limit, target_start, target_limit] # Verify input tensor shapes and types. source_start.shape.assert_is_compatible_with(source_limit.shape) target_start.shape.assert_is_compatible_with(target_limit.shape) source_start.shape.assert_same_rank(target_start.shape) source_start.shape.assert_same_rank(target_limit.shape) source_limit.shape.assert_same_rank(target_start.shape) source_limit.shape.assert_same_rank(target_limit.shape) if not (source_start.dtype == target_start.dtype == source_limit.dtype == target_limit.dtype): raise TypeError('source_start, source_limit, target_start, and ' 'target_limit must all have the same dtype') ndims = set( [t.shape.ndims for t in span_tensors if t.shape.ndims is not None]) assert len(ndims) <= 1 # because of assert_same_rank statements above. if all(not isinstance(t, ragged_tensor.RaggedTensor) for t in span_tensors): return _span_overlaps(source_start, source_limit, target_start, target_limit, contains, contained_by, partial_overlap) elif all(isinstance(t, ragged_tensor.RaggedTensor) for t in span_tensors): if not ndims: raise ValueError('For ragged inputs, the shape.ndims of at least one ' 'span tensor must be statically known.') if list(ndims)[0] == 2: return _span_overlaps(source_start, source_limit, target_start, target_limit, contains, contained_by, partial_overlap) else: # Handle ragged batch dimension by recursion on values. row_splits = span_tensors[0].row_splits shape_checks = [ check_ops.assert_equal( t.row_splits, row_splits, message='Mismatched ragged shapes for batch dimensions') for t in span_tensors[1:] ] with ops.control_dependencies(shape_checks): return ragged_tensor.RaggedTensor.from_row_splits( span_overlaps(source_start.values, source_limit.values, target_start.values, target_limit.values, contains, contained_by, partial_overlap), row_splits) else: # Mix of dense and ragged tensors. raise ValueError('Span tensors must all have the same ragged_rank') def _span_overlaps(source_start, source_limit, target_start, target_limit, contains, contained_by, partial_overlap): """Implementation of span_overlaps(). If the inputs are ragged, then the source tensors must have exactly one batch dimension. (I.e., `B=1` in the param descriptions below.) Args: source_start: `<int>[D1...DB, source_size]` source_limit: `<int>[D1...DB, source_size]` target_start: `<int>[D1...DB, target_size]` target_limit: `<int>[D1...DB, target_size]` contains: `bool` contained_by: `bool` partial_overlap: `bool` Returns: `<bool>[D1...DB, source_size, target_size]` """ if isinstance(source_start, ops.Tensor): # Reshape the source tensors to [D1...DB, source_size, 1] and the # target tensors to [D1...DB, 1, target_size], so we can use broadcasting. # In particular, elementwise_op(source_x, target_x) will have shape # [D1...DB, source_size, target_size]. source_start = array_ops.expand_dims(source_start, -1) source_limit = array_ops.expand_dims(source_limit, -1) target_start = array_ops.expand_dims(target_start, -2) target_limit = array_ops.expand_dims(target_limit, -2) equal = math_ops.equal less_equal = math_ops.less_equal less = math_ops.less logical_and = math_ops.logical_and logical_or = math_ops.logical_or else: # Broadcast the source span indices to all have shape # [batch_size, (source_size), (target_size)]. (source_start, source_limit) = _broadcast_ragged_sources_for_overlap( source_start, source_limit, target_start.row_splits) (target_start, target_limit) = _broadcast_ragged_targets_for_overlap( target_start, target_limit, source_start.row_splits) # Use map_flat_values to perform elementwise operations. equal = functools.partial(ragged_functional_ops.map_flat_values, math_ops.equal) less_equal = functools.partial(ragged_functional_ops.map_flat_values, math_ops.less_equal) less = functools.partial(ragged_functional_ops.map_flat_values, math_ops.less) logical_and = functools.partial(ragged_functional_ops.map_flat_values, math_ops.logical_and) logical_or = functools.partial(ragged_functional_ops.map_flat_values, math_ops.logical_or) if partial_overlap: return logical_or( logical_and( less_equal(source_start, target_start), less(target_start, source_limit)), logical_and( less_equal(target_start, source_start), less(source_start, target_limit))) elif contains and contained_by: return logical_or( logical_and( less_equal(source_start, target_start), less_equal(target_limit, source_limit)), logical_and( less_equal(target_start, source_start), less_equal(source_limit, target_limit))) elif contains: return logical_and( less_equal(source_start, target_start), less_equal(target_limit, source_limit)) elif contained_by: return logical_and( less_equal(target_start, source_start), less_equal(source_limit, target_limit)) else: return logical_and( equal(target_start, source_start), equal(source_limit, target_limit)) def _broadcast_ragged_targets_for_overlap(target_start, target_limit, source_splits): """Repeats target indices for each source item in the same batch. Args: target_start: `<int>[batch_size, (target_size)]` target_limit: `<int>[batch_size, (target_size)]` source_splits: `<int64>[batch_size, (source_size+1)]` Returns: `<int>[batch_size, (source_size), (target_size)]`. A tuple of ragged tensors `(tiled_target_start, tiled_target_limit)` where: * `tiled_target_start[b, s, t] = target_start[b, t]` * `tiled_target_limit[b, s, t] = target_limit[b, t]` """ source_batch_ids = segment_id_ops.row_splits_to_segment_ids(source_splits) target_start = ragged_tensor.RaggedTensor.from_value_rowids( ragged_gather_ops.gather(target_start, source_batch_ids), source_batch_ids) target_limit = ragged_tensor.RaggedTensor.from_value_rowids( ragged_gather_ops.gather(target_limit, source_batch_ids), source_batch_ids) return (target_start, target_limit) def _broadcast_ragged_sources_for_overlap(source_start, source_limit, target_splits): """Repeats source indices for each target item in the same batch. Args: source_start: `<int>[batch_size, (source_size)]` source_limit: `<int>[batch_size, (source_size)]` target_splits: `<int64>[batch_size, (target_size+1)]` Returns: `<int>[batch_size, (source_size), (target_size)]`. A tuple of tensors `(tiled_source_start, tiled_source_limit)` where: * `tiled_target_start[b, s, t] = source_start[b, s]` * `tiled_target_limit[b, s, t] = source_limit[b, s]` """ source_splits = source_start.row_splits target_rowlens = target_splits[1:] - target_splits[:-1] source_batch_ids = segment_id_ops.row_splits_to_segment_ids(source_splits) # <int64>[sum(source_size[b] for b in range(batch_size))] # source_repeats[i] is the number of target spans in the batch that contains # source span i. We need to add a new ragged dimension that repeats each # source span this number of times. source_repeats = ragged_gather_ops.gather(target_rowlens, source_batch_ids) # <int64>[sum(source_size[b] for b in range(batch_size)) + 1] # The row_splits tensor for the inner ragged dimension of the result tensors. inner_splits = array_ops.concat([[0], math_ops.cumsum(source_repeats)], axis=0) # <int64>[sum(source_size[b] * target_size[b] for b in range(batch_size))] # Indices for gathering source indices. source_indices = segment_id_ops.row_splits_to_segment_ids(inner_splits) source_start = ragged_tensor.RaggedTensor.from_nested_row_splits( array_ops.gather(source_start.values, source_indices), [source_splits, inner_splits]) source_limit = ragged_tensor.RaggedTensor.from_nested_row_splits( array_ops.gather(source_limit.values, source_indices), [source_splits, inner_splits]) return source_start, source_limit def span_alignment(source_start, source_limit, target_start, target_limit, contains=False, contained_by=False, partial_overlap=False, multivalent_result=False, name=None): """Return an alignment from a set of source spans to a set of target spans. The source and target spans are specified using B+1 dimensional tensors, with `B>=0` batch dimensions followed by a final dimension that lists the span offsets for each span in the batch: * The `i`th source span in batch `b1...bB` starts at `source_start[b1...bB, i]` (inclusive), and extends to just before `source_limit[b1...bB, i]` (exclusive). * The `j`th target span in batch `b1...bB` starts at `target_start[b1...bB, j]` (inclusive), and extends to just before `target_limit[b1...bB, j]` (exclusive). `result[b1...bB, i]` contains the index (or indices) of the target span that overlaps with the `i`th source span in batch `b1...bB`. The `multivalent_result` parameter indicates whether the result should contain a single span that aligns with the source span, or all spans that align with the source span. * If `multivalent_result` is false (the default), then `result[b1...bB, i]=j` indicates that the `j`th target span overlaps with the `i`th source span in batch `b1...bB`. If no target spans overlap with the `i`th target span, then `result[b1...bB, i]=-1`. * If `multivalent_result` is true, then `result[b1...bB, i, n]=j` indicates that the `j`th target span is the `n`th span that overlaps with the `i`th source span in in batch `b1...bB`. For a definition of span overlap, see the docstring for `span_overlaps()`. Args: source_start: A B+1 dimensional potentially ragged tensor with shape `[D1...DB, source_size]`: the start offset of each source span. source_limit: A B+1 dimensional potentially ragged tensor with shape `[D1...DB, source_size]`: the limit offset of each source span. target_start: A B+1 dimensional potentially ragged tensor with shape `[D1...DB, target_size]`: the start offset of each target span. target_limit: A B+1 dimensional potentially ragged tensor with shape `[D1...DB, target_size]`: the limit offset of each target span. contains: If true, then a source span is considered to overlap a target span when the source span contains the target span. contained_by: If true, then a source span is considered to overlap a target span when the source span is contained by the target span. partial_overlap: If true, then a source span is considered to overlap a target span when the source span partially overlaps the target span. multivalent_result: Whether the result should contain a single target span index (if `multivalent_result=False`) or a list of target span indices (if `multivalent_result=True`) for each source span. name: A name for the operation (optional). Returns: An int64 tensor with values in the range: `-1 <= result < target_size`. If `multivalent_result=False`, then the returned tensor has shape `[source_size]`, where `source_size` is the length of the `source_start` and `source_limit` input tensors. If `multivalent_result=True`, then the returned tensor has shape `[source_size, (num_aligned_target_spans)]. #### Examples: Given the following source and target spans (with no batch dimensions): ```python >>> # 0 5 10 15 20 25 30 35 40 45 50 55 60 >>> # |====|====|====|====|====|====|====|====|====|====|====|====| >>> # Source: [-0-] [-1-] [2] [3] [4][-5-][-6-][-7-][-8-][-9-] >>> # Target: [-0-][-1-] [-2-][-3-][-4-] [5] [6] [7] [-8-][-9-][10] >>> # |====|====|====|====|====|====|====|====|====|====|====|====| >>> source_start=[0, 10, 16, 20, 27, 30, 35, 40, 45, 50] >>> source_limit=[5, 15, 19, 23, 30, 35, 40, 45, 50, 55] >>> target_start=[0, 5, 15, 20, 25, 31, 35, 42, 47, 52, 57] >>> target_limit=[5, 10, 20, 25, 30, 34, 38, 45, 52, 57, 61] >>> span_alignment_lists(source_starts, source_limits, target_starts, target_limits) [0, -1, -1, -1, -1, -1, -1, -1, -1, -1] >>> span_alignment_lists(source_starts, source_limits, ... target_starts, target_limits, ... multivalent_result=True) [[0], [], [], [], [], [], [], [], [], []] >>> span_alignment_lists(source_starts, source_limits, ... target_starts, target_limits, ... contains=True) [ 0, -1, -1, -1, -1, 5, 6, 7, -1, -1] >>> span_alignment_lists(source_starts, source_limits, ... target_starts, target_limits, ... partial_overlap=True, ... multivalent_result=True) [[0], [], [2], [3], [4], [5], [6], [7], [8], [8, 9]] """ scope_tensors = [source_start, source_limit, target_start, target_limit] with ops.name_scope(name, 'SpanAlignment', scope_tensors): source_start = ragged_tensor.convert_to_tensor_or_ragged_tensor( source_start, name='source_start') source_limit = ragged_tensor.convert_to_tensor_or_ragged_tensor( source_limit, name='source_limit') target_start = ragged_tensor.convert_to_tensor_or_ragged_tensor( target_start, name='target_start') target_limit = ragged_tensor.convert_to_tensor_or_ragged_tensor( target_limit, name='target_limit') # <bool>[D1...DB, source_size, target_size] # overlaps[b1...bB, i, j] is true if source span i overlaps target span j # (in batch b1...bB). overlaps = span_overlaps(source_start, source_limit, target_start, target_limit, contains, contained_by, partial_overlap) # <int64>[D1...DB, source_size, (num_aligned_spans)] # alignment[b1...bB, i, n]=j if target span j is the n'th target span # that aligns with source span i (in batch b1...bB). alignment = _multivalent_span_alignment(overlaps) if not multivalent_result: # <int64>[D1...DB, source_size] # alignment[b1...bB, i]=j if target span j is the last target span # that aligns with source span i, or -1 if no target spans align. alignment = ragged_functional_ops.map_flat_values( math_ops.maximum, ragged_math_ops.reduce_max(alignment, axis=-1), -1) return alignment def _multivalent_span_alignment(overlaps): """Returns the multivalent span alignment for a given overlaps tensor. Args: overlaps: `<int64>[D1...DB, source_size, target_size]`: `overlaps[b1...bB, i, j]` is true if source span `i` overlaps target span `j` (in batch `b1...bB`). Returns: `<int64>[D1...DB, source_size, (num_aligned_spans)]`: `result[b1...bB, i, n]=j` if target span `j` is the `n`'th target span that aligns with source span `i` (in batch `b1...bB`). """ overlaps_ndims = overlaps.shape.ndims assert overlaps_ndims is not None # guaranteed/checked by span_overlaps() assert overlaps_ndims >= 2 # If there are multiple batch dimensions, then flatten them and recurse. if overlaps_ndims > 3: if not isinstance(overlaps, ragged_tensor.RaggedTensor): overlaps = ragged_tensor.RaggedTensor.from_tensor( overlaps, ragged_rank=overlaps.shape.ndims - 3) return overlaps.with_values(_multivalent_span_alignment(overlaps.values)) elif overlaps_ndims == 2: # no batch dimension assert not isinstance(overlaps, ragged_tensor.RaggedTensor) overlap_positions = array_ops.where(overlaps) return ragged_tensor.RaggedTensor.from_value_rowids( values=overlap_positions[:, 1], value_rowids=overlap_positions[:, 0], nrows=array_ops.shape(overlaps, out_type=dtypes.int64)[0]) else: # batch dimension if not isinstance(overlaps, ragged_tensor.RaggedTensor): overlaps = ragged_tensor.RaggedTensor.from_tensor(overlaps, ragged_rank=1) overlap_positions = ragged_where_op.where(overlaps.values) if isinstance(overlaps.values, ragged_tensor.RaggedTensor): overlaps_values_nrows = overlaps.values.nrows() else: overlaps_values_nrows = array_ops.shape(overlaps.values, out_type=dtypes.int64)[0] return overlaps.with_values( ragged_tensor.RaggedTensor.from_value_rowids( values=overlap_positions[:, 1], value_rowids=overlap_positions[:, 0], nrows=overlaps_values_nrows)) def _check_type(value, name, expected_type): """Raises TypeError if not isinstance(value, expected_type).""" if not isinstance(value, expected_type): raise TypeError('%s must be %s, not %s' % (name, expected_type.__name__, type(value).__name__))
import numpy as np from nose.tools import assert_equal from numpy.testing import assert_array_equal from ..fixes import _in1d, _copysign def test_in1d(): a = np.arange(10) b = a[a%2 == 0] assert_equal(_in1d(a, b).sum(), 5) def test_copysign(): a = np.array([-1, 1, -1]) b = np.array([ 1, -1, 1]) assert_array_equal (_copysign (a, b), b) assert_array_equal (_copysign (b, a), a)
import numpy as np import time import copy from numpy import array, random, diag from vb_mf import normalize_trans, normalize_emit, make_log_obs_matrix, make_log_obs_matrix_gaussian from treehmm.static import float_type min_val = float_type('1e-150') def independent_update_qs(args): theta, alpha, beta, gamma, X, log_obs_mat, Q, Q_pairs = args.theta, args.alpha, args.beta, args.gamma, args.X, args.log_obs_mat, args.Q, args.Q_pairs I, T, L = X.shape K = alpha.shape[0] a_s = np.zeros((T, K), dtype=float_type) b_s = np.zeros((T, K), dtype=float_type) Q[:] = np.zeros((I, T, K), dtype=float_type) Q_pairs[:] = np.zeros((I, T, K, K), dtype=float_type) loglh = np.zeros(I, dtype=float_type) print 'initializing Q', for i in range(I): emit_probs_mat = np.exp(log_obs_mat[i, :, :]).T if np.any(emit_probs_mat < min_val): print 'applying minimum probability to emit probs' emit_probs_mat[emit_probs_mat < min_val] = min_val transmat = alpha a_t = gamma * emit_probs_mat[:, 0] s_t = [a_t.sum()] a_t /= s_t[0] b_t = np.ones((K, )) a_s[0, :] = a_t b_s[T - 1, :] = b_t # forward algorithm for t in range(1, T): a_t = emit_probs_mat[:, t] * np.dot(a_t.T, transmat) s_t.append(a_t.sum()) a_t /= s_t[t] a_s[t, :] = a_t #back-ward algorithm for t in range(T - 2, -1, -1): b_t = np.dot(transmat, emit_probs_mat[:, t+1] * b_t) b_t /= s_t[t + 1] # previously t b_s[t,:] = b_t loglh[i] = np.log(array(s_t)).sum() for t in range(1,T): tmp1 = a_s[t, :] * b_s[t, :] Q[i, t, :] = tmp1/tmp1.sum() tmp2 = np.dot(np.dot(diag(a_s[t-1,:]), transmat), diag(emit_probs_mat[:,t]* b_s[t,:])) Q_pairs[i, t, :, :] = tmp2/tmp2.sum() if np.any(Q < min_val): print 'fixing Q... values too low' Q[Q < min_val] = min_val print 'done' def independent_update_params(args, renormalize=True): X = args.X Q, Q_pairs, theta, alpha, beta, gamma, vert_parent, vert_children, log_obs_mat, pseudocount = ( args.Q, args.Q_pairs, args.theta, args.alpha, args.beta, args.gamma, args.vert_parent, args.vert_children, args.log_obs_mat, args.pseudocount) I, T, K = Q.shape L = X.shape[2] if args.continuous_observations: new_means = np.zeros_like(args.means) new_variances = np.zeros_like(args.variances) total_q = np.zeros_like(args.variances) else: emit_probs = args.emit_probs emit_probs[:] = pseudocount theta[:] = pseudocount alpha[:] = pseudocount beta[:] = pseudocount gamma[:] = pseudocount for i in xrange(I): vp = vert_parent[i] for t in xrange(T): for k in xrange(K): if i==0 and t==0: gamma[k] += Q[i, t, k] else: for v in xrange(K): if t == 0: beta[v,k] += Q[vp,t,v] * Q[i,t,k] else: alpha[v,k] += Q_pairs[i,t,v,k] if not args.continuous_observations: for l in xrange(L): if X[i,t,l]: emit_probs[k, l] += Q[i, t, k] if args.continuous_observations: for i in xrange(I): for t in xrange(T): for k in xrange(K): for l in xrange(L): new_means[k,l] += Q[i, t, k] * X[i,t,l] # expectation of X wrt Q total_q[k,l] += Q[i,t,k] args.means[:] = new_means = new_means / total_q + 1e-50 np.seterr(under='ignore') for i in xrange(I): for t in xrange(T): for k in xrange(K): for l in xrange(L): new_variances[k,l] += Q[i, t, k] * (X[i,t,l] - new_means[k,l]) * (X[i,t,l] - new_means[k,l]) np.seterr(under='print') args.variances[:] = new_variances / total_q # 1 / N_k args.variances += pseudocount else: normalize_emit(Q, emit_probs, pseudocount, args, renormalize) if renormalize: theta += theta.max() * (pseudocount * 1e-20) alpha += alpha.max() * (pseudocount * 1e-20) beta += beta.max() * (pseudocount * 1e-20) gamma += gamma.max() * (pseudocount * 1e-20) normalize_trans(theta, alpha, beta, gamma) if args.continuous_observations: make_log_obs_matrix_gaussian(args) else: make_log_obs_matrix(args) def independent_free_energy(args): """Calculate the free energy for Q""" I, T, L = args.X.shape K = args.alpha.shape[0] a_s = np.zeros((T,K), dtype=float_type) b_s = np.zeros((T,K), dtype=float_type) loglh = np.zeros(I, dtype=float_type) transmat = args.alpha for i in range(I): emit_probs_mat = np.exp(args.log_obs_mat[i,:,:]).T if np.any(emit_probs_mat < min_val): print 'applying minimum probability to emit probs' emit_probs_mat[emit_probs_mat < min_val] = min_val a_t = args.gamma * emit_probs_mat[:, 0] s_t = [a_t.sum()] a_t /= s_t[0] b_t = np.ones((K,)) a_s[0,:] = a_t b_s[T-1,:] = b_t # forward algorithm for t in range(1,T): a_t = emit_probs_mat[:,t] * np.dot(a_t.T, transmat) s_t.append(a_t.sum()) a_t /= s_t[t] a_s[t,:] = a_t loglh[i] = np.log(array(s_t)).sum() return loglh.sum()
""" ============================================================== Reading a .dip file form xfit and view with source space in 3D ============================================================== Here the .dip file was generated with the mne_dipole_fit command. Detailed unix command is : $mne_dipole_fit --meas sample_audvis-ave.fif --set 1 --meg --tmin 40 --tmax 95 \ --bmin -200 --bmax 0 --noise sample_audvis-cov.fif \ --bem ../../subjects/sample/bem/sample-5120-bem-sol.fif \ --origin 0:0:40 --mri sample_audvis-meg-oct-6-fwd.fif \ --dip sample_audvis_set1.dip """ print(__doc__) import numpy as np import mne from mne.datasets import sample data_path = sample.data_path() fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif' dip_fname = data_path + '/MEG/sample/sample_audvis_set1.dip' bem_fname = data_path + '/subjects/sample/bem/sample-5120-bem-sol.fif' brain_surface = mne.read_bem_surfaces(bem_fname, add_geom=True)[0] points = brain_surface['rr'] faces = brain_surface['tris'] fwd = mne.read_forward_solution(fwd_fname) src = fwd['src'] time, pos, amplitude, ori, gof = mne.read_dip(dip_fname) print("Time (ms): %s" % time) print("Amplitude (nAm): %s" % amplitude) print("GOF (%%): %s" % gof) pos = pos[gof > 50.] ori = ori[gof > 50.] time = time[gof > 50.] try: from enthought.mayavi import mlab except: from mayavi import mlab lh_points = src[0]['rr'] lh_faces = src[0]['use_tris'] mlab.figure(size=(600, 600), bgcolor=(1, 1, 1), fgcolor=(0, 0, 0)) points = brain_surface['rr'] faces = brain_surface['tris'] coord_trans = fwd['mri_head_t']['trans'] points = np.dot(coord_trans[:3, :3], points.T).T + coord_trans[:3, -1] mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2], faces, color=(1, 1, 0), opacity=0.3) mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2], lh_faces, color=(0.7, ) * 3) dipoles = mlab.quiver3d(pos[:, 0], pos[:, 1], pos[:, 2], ori[:, 0], ori[:, 1], ori[:, 2], opacity=1., scale_factor=4e-4, scalars=time, mode='cone', colormap='RdBu') dipoles.module_manager.scalar_lut_manager.reverse_lut = True mlab.colorbar(dipoles, title='Dipole fit time (ms)') mlab.get_engine().scenes[0].scene.x_plus_view()
""" The implementation of an IPython shell. """ try: import IPython.frontend except ImportError: raise ImportError, ''' ________________________________________________________________________________ Could not load the Wx frontend for ipython. You need to have ipython >= 0.9 installed to use the ipython widget.''' from toolkit import toolkit_object IPythonWidget= toolkit_object('ipython_widget:IPythonWidget')
from django.conf.urls import url from django.contrib.staticfiles.storage import staticfiles_storage from django.views.generic.base import RedirectView from eventkit_cloud.core.urls import urlpatterns as eventkit_cloud_urlpatterns urlpatterns = [ url( r"^favicon.png$", RedirectView.as_view(url=staticfiles_storage.url("images/favicon.png"), permanent=False), name="favicon", ), ] urlpatterns += eventkit_cloud_urlpatterns
"""Utilities for use across the Message Manamgent module""" class APIDict(dict): """Custom API Dict type""" def __init__(self, session_func, uri=None, *args, **kwargs): super(APIDict, self).__init__(*args, **kwargs) self.session_func = session_func self.uri = uri def __setitem__(self, key, value): """Handle adding a new key, value pair in this dict via an appropriate API PUT call """ response = super(APIDict, self).__setitem__(key, value) api_args = {x: self[x] for x in self if x is not None and not hasattr(self[x], '__call__') and key != 'uri'} if self.session_func is not None and self.uri is not None: self.session_func().execute(self.uri, 'POST', api_args) return response def __delitem__(self, key): """Handle the removal of an entry in this dict via an appropriate API call """ response = super(APIDict, self).__delitem__(key) api_args = {x: self[x] for x in self if x is not None and not hasattr(self[x], '__call__') and key != 'uri'} if self.session_func is not None and self.uri is not None: self.session_func().execute(self.uri, 'POST', api_args) return response
from __future__ import print_function import slices, go a = [1,2,3,4] b = slices.CreateSlice() print ("Python list:", a) print ("Go slice: ", b) print ("slices.IntSum from Python list:", slices.IntSum(go.Slice_int(a))) print ("slices.IntSum from Go slice:", slices.IntSum(b)) su8 = slices.SliceUint8([1,2]) su16 = slices.SliceUint16([2,3]) su32 = slices.SliceUint32([3,4]) su64 = slices.SliceUint64([4,5]) print ("unsigned slice elements:", su8[0], su16[0], su32[0], su64[0]) si8 = slices.SliceInt8([-1,-2]) si16 = slices.SliceInt16([-2,-3]) si32 = slices.SliceInt32([-3,-4]) si64 = slices.SliceInt64([-4,-5]) print ("signed slice elements:", si8[0], si16[0], si32[0], si64[0]) ss = slices.CreateSSlice() print ("struct slice: ", ss) print ("struct slice[0]: ", ss[0]) print ("struct slice[1]: ", ss[1]) print ("struct slice[2].Name: ", ss[2].Name) slices.PrintSSlice(ss) slices.PrintS(ss[0]) slices.PrintS(ss[1]) print("OK")
import argparse import sys from pgltools_library import * def _formatContacts(contacts,delim): return [delim.join([str(y) for y in x[:-1]])+delim+delim.join([str(y) for y in x[-1]]) for x in contacts if len(x)!=0] def _subtract1D(chrA1,startA1,endA1,chrA2,startA2,endA2,chrB,startB,endB,Aannots,whichBin): if whichBin==1: if startA1 < startB: if endA1<endB: return [[chrA1,startA1,startB,chrA2,startA2,endA2,Aannots]] else: return [[chrA1,startA1,startB,chrA2,startA2,endA2,Aannots],[chrA1,endB,endA1,chrA2,startA2,endA2,Aannots]] else: if endA1<endB: return [[]] else: return [[chrA1,startB,endA1,chrA2,startA2,endA2,Aannots]] elif whichBin==2: if startA2 < startB: if endA2<endB: return [[chrA1,startA1,endA1,chrA2,startA2,startB,Aannots]] else: return [[chrA1,startA1,endA1,chrA2,startA2,startB,Aannots],[chrA1,startA1,endA1,chrA2,endB,endA2,Aannots]] else: if endA2<endB: return [[]] else: return [[chrA1,startA1,endA1,chrA2,startB,endA2,Aannots]] def _overlap1D(contactsA,bedB): #our files are going to be given with [chr1 binStart1 binEnd1 chr2 binStart2 binEnd2] newPeaks=[] #compare file 2 to file 1, meaning advance file 2 first for i in range(len(contactsA)): chrA1=contactsA[i][0] startA1=contactsA[i][1] endA1=contactsA[i][2] chrA2=contactsA[i][3] startA2=contactsA[i][4] endA2=contactsA[i][5] Aannots=contactsA[i][6] if chrA1==chrA2: if chrA1 in bedB: for k in range(len(bedB[chrA1])): chrB=chrA1 startB=bedB[chrB][k][0] endB=bedB[chrB][k][1] if startB!=endB: startB+=1 if startA1 < startB and endA1 <= startB: pass elif startB < startA1 and endB <= startA1: pass else: newPeaks.extend(_subtract1D(chrA1,startA1,endA1,chrA2,startA2,endA2,chrB,startB,endB,Aannots,1)) if startA2 < startB and endA2 < startB: pass elif startB < startA2 and endB < startA2: pass else: newPeaks.extend(_subtract1D(chrA1,startA1,endA1,chrA2,startA2,endA2,chrB,startB,endB,Aannots,2)) else: if chrA1 in bedB: for k in range(len(bedB[chrA1])): chrB=chrA1 startB=bedB[chrB][k][0] endB=bedB[chrB][k][1] if startB!=endB: startB+=1 if startA1 < startB and endA1 <= startB: continue elif startB < startA1 and endB <= startA1: break else: newPeaks.extend(_subtract1D(chrA1,startA1,endA1,chrA2,startA2,endA2,chrB,startB,endB,Aannots,1)) if chrA2 in bedB: for k in range(len(bedB[chrA2])): chrB=chrA2 startB=bedB[chrB][k][0] endB=bedB[chrB][k][1] if startB!=endB: startB+=1 if startA2 < startB and endA2 <= startB: continue elif startB < startA2 and endB <= startA2: break else: newPeaks.extend(_subtract1D(chrA1,startA1,endA1,chrA2,startA2,endA2,chrB,startB,endB,Aannots,2)) return newPeaks def subtract1D(A,B,args,header): res=_overlap1D(A,B) res=_formatContacts(res,"\t") if __name__=="__main__": try: if len(res)!=0: if len(header)!=0: print(header) print("\n".join(res)) except IOError as e: if e.errno==32: exit() else: funcOut=[] for r in res: r=r.split("\t") funcOut.append([r[0],int(r[1]),int(r[2]),r[3],int(r[4]),int(r[5]),r[6:]]) return funcOut if __name__=="__main__": #parse args parser=argparse.ArgumentParser() parser._optionals.title = "Arguments" parser.add_argument('-a',help="File Path for file a. Required unless -stdInA is used", required=False,default="%#$") parser.add_argument('-stdInA',help="Will use stdin for file a. ", required=False,action='store_true') parser.add_argument('-b',help="File Path for file b. Required for merge and intersect unless -stdInB is used", required=False,default="%#$") parser.add_argument('-stdInB',help="Will use stdin for file b.",action='store_true') args = vars(parser.parse_args()) #show help with no args if len(sys.argv)==1: parser.print_help() sys.exit(1) #validate args if args['stdInB'] and args['stdInA']: print "stdin can only be used for either a or b" exit(1) elif args['stdInA']==False and args['a']=="%#$": print "either -stdInA or -a must be used" exit(1) elif args['stdInB']==False and args['b']=="%#$": print "either -stdInB or -b must be used" exit(1) if args['stdInA']: header,A=processStdin() else: header,A=processFile(args['a']) if checkSorted(A)==1: print ("File A is not sorted. Please use pgltools sort [FILE]") exit() elif checkSorted(A)==2: print ("File A is not a pgl file. Please use pgltools formatbedpe [FILE]") exit() if args["b"]!="%#$": _,B=processBedFile(args['b']) if args['stdInB']: _,B=processStdinBed() subtract1D(A,B,args,header)
from django.db.models.fields import FieldDoesNotExist class WrongManager(Exception): pass
from __future__ import absolute_import, division, print_function, unicode_literals import pytest ; pytest import six from bokeh.document import Document import bokeh.application.handlers.code as bahc script_adds_two_roots = """ from bokeh.io import curdoc from bokeh.model import Model from bokeh.core.properties import Int, Instance class AnotherModelInTestScript(Model): bar = Int(1) class SomeModelInTestScript(Model): foo = Int(2) child = Instance(Model) curdoc().add_root(AnotherModelInTestScript()) curdoc().add_root(SomeModelInTestScript()) """ class TestCodeHandler(object): def test_empty_script(self): doc = Document() handler = bahc.CodeHandler(source="# This script does nothing", filename="path/to/test_filename") handler.modify_document(doc) if handler.failed: raise RuntimeError(handler.error) assert not doc.roots @pytest.mark.skipif(six.PY3, reason="this test doesn't have a Python 3 equivalent") def test_exec_and___future___flags(self): doc = Document() handler = bahc.CodeHandler(source="exec(\"print \\\"XXX\\\"\")", filename="path/to/test_filename") handler.modify_document(doc) if handler.failed: raise RuntimeError(handler.error) assert not doc.roots def test_script_adds_roots(self): doc = Document() handler = bahc.CodeHandler(source=script_adds_two_roots, filename="path/to/test_filename") handler.modify_document(doc) if handler.failed: raise RuntimeError(handler.error) assert len(doc.roots) == 2 def test_script_bad_syntax(self): doc = Document() handler = bahc.CodeHandler(source="This is a syntax error", filename="path/to/test_filename") handler.modify_document(doc) assert handler.error is not None assert 'Invalid syntax' in handler.error def test_script_runtime_error(self): doc = Document() handler = bahc.CodeHandler(source="raise RuntimeError('nope')", filename="path/to/test_filename") handler.modify_document(doc) assert handler.error is not None assert 'nope' in handler.error def test_script_sys_path(self): doc = Document() handler = bahc.CodeHandler(source="""import sys; raise RuntimeError("path: '%s'" % sys.path[0])""", filename="path/to/test_filename") handler.modify_document(doc) assert handler.error is not None assert "path: 'path/to'" in handler.error def test_script_argv(self): doc = Document() handler = bahc.CodeHandler(source="""import sys; raise RuntimeError("argv: %r" % sys.argv)""", filename=str("path/to/test_filename")) # str needed for py2.7 handler.modify_document(doc) assert handler.error is not None assert "argv: ['test_filename']" in handler.error doc = Document() handler = bahc.CodeHandler(source="""import sys; raise RuntimeError("argv: %r" % sys.argv)""", filename=str("path/to/test_filename"), argv=[10, 20, 30]) # str needed for py2.7 handler.modify_document(doc) assert handler.error is not None assert "argv: ['test_filename', 10, 20, 30]" in handler.error def test_safe_to_fork(self): doc = Document() handler = bahc.CodeHandler(source="# This script does nothing", filename="path/to/test_filename") assert handler.safe_to_fork handler.modify_document(doc) if handler.failed: raise RuntimeError(handler.error) assert not handler.safe_to_fork
import datetime from decimal import Decimal import mock from constance.admin import get_values from constance.checks import check_fieldsets, get_inconsistent_fieldnames from constance.management.commands.constance import _set_constance_value from django.core.exceptions import ValidationError from django.test import TestCase from constance import settings class ChecksTestCase(TestCase): @mock.patch("constance.settings.CONFIG_FIELDSETS", {"Set1": settings.CONFIG.keys()}) def test_get_inconsistent_fieldnames_none(self): """ Test that get_inconsistent_fieldnames returns an empty set and no checks fail if CONFIG_FIELDSETS accounts for every key in settings.CONFIG. """ self.assertFalse(get_inconsistent_fieldnames()) self.assertEqual(0, len(check_fieldsets())) @mock.patch( "constance.settings.CONFIG_FIELDSETS", {"Set1": list(settings.CONFIG.keys())[:-1]}, ) def test_get_inconsistent_fieldnames_one(self): """ Test that get_inconsistent_fieldnames returns a set and the check fails if CONFIG_FIELDSETS does not account for every key in settings.CONFIG. """ self.assertTrue(get_inconsistent_fieldnames()) self.assertEqual(1, len(check_fieldsets())) @mock.patch( "constance.settings.CONFIG_FIELDSETS", {} ) def test_check_fieldsets(self): """ check_fieldsets should not output warning if CONFIG_FIELDSETS is not defined. """ del settings.CONFIG_FIELDSETS self.assertEqual(0, len(check_fieldsets()))
from __future__ import absolute_import, division, print_function, unicode_literals import pytest ; pytest import io from os import pardir from os.path import split, join, abspath, relpath, basename, splitext import subprocess @pytest.mark.codebase def test_code_quality(): ''' Applies a collection of general codebase style and quality rules to every file inm the repository. Unless specifically excepted: * Files should not contain tabs * Files should not start with newlines * Files should end with one empty line * Lines should not contain trailing whitespace * Lines should not exceed 160 characters ''' errors = collect_errors() assert len(errors) == 0, "Code quality issues:\n%s" % "\n".join(errors) TOP_PATH = abspath(join(split(__file__)[0], pardir, pardir)) MAX_LINE_LENGTH = 160 message_space = "File contains trailing whitespace: %s, line %s." message_tabs = "File contains tabs instead of spaces: %s, line %s." message_carriage = "File contains carriage returns at end of line: %s, line %s" message_eof = "File does not end with a newline: %s, line %s" message_multi_bof = "File starts with more than 1 empty line: %s, line %s" message_multi_eof = "File ends with more than 1 empty line: %s, line %s" message_too_long = "File contains a line with over %(n)s characters: %%s, line %%s" % dict(n=MAX_LINE_LENGTH) def tab_in_leading(s): """ Returns True if there are tabs in the leading whitespace of a line, including the whitespace of docstring code samples. """ n = len(s) - len(s.lstrip()) if not s[n:n + 3] in ['...', '>>>']: check = s[:n] else: smore = s[n + 3:] check = s[:n] + smore[:len(smore) - len(smore.lstrip())] return check.expandtabs() != check def use_tab_rule(fname): return not (basename(fname) == 'Makefile' or splitext(fname)[1] == '.bat') exclude_paths = ("CHANGELOG",) exclude_exts = (".patch", ".png", ".jpg", ".pxm", ".ico", ".ics", ".gz", ".gif", ".enc", ".svg", ".xml", ".shp", ".dbf", ".shx", "otf", ".eot", ".ttf", ".woff", ".woff2") exclude_dirs = ("sphinx/draw.io",) def collect_errors(): errors = [] def test_this_file(fname, test_file): line = None for idx, line in enumerate(test_file): line_no = idx + 1 if idx == 0 and len(line.strip()) == 0: errors.append((message_multi_bof, fname, line_no)) if line.endswith(" \n") or line.endswith("\t\n"): errors.append((message_space, fname, line_no)) if line.endswith("\r\n") or line.endswith("\r"): errors.append((message_carriage, fname, line_no)) if use_tab_rule(fname) and tab_in_leading(line): errors.append((message_tabs, fname, line_no)) #if len(line) > MAX_LINE_LENGTH: # errors.append((message_too_long, fname, line_no)) if line is not None: if idx > 0 and len(line.strip()) == 0: errors.append((message_multi_eof, fname, line_no)) if not line.endswith('\n'): errors.append((message_eof, fname, line_no)) paths = subprocess.check_output(["git", "ls-files"]).decode('utf-8').split("\n") for path in paths: if not path: continue if path in exclude_paths: continue if path.endswith(exclude_exts): continue if path.startswith(exclude_dirs): continue with io.open(path, 'r', encoding='utf-8') as file: test_this_file(path, file) return [ msg % (relpath(fname, TOP_PATH), line_no) for (msg, fname, line_no) in errors ] def bad_files(): return " ".join(sorted(set([ file for (_, file, _) in collect_errors() ])))
""" Models for representing top-level plot objects. """ from __future__ import absolute_import from six import string_types import warnings from ..core.query import find from ..core import validation from ..core.validation.errors import REQUIRED_RANGE from ..core.validation.warnings import ( MISSING_RENDERERS, NO_DATA_RENDERERS, MALFORMED_CATEGORY_LABEL, SNAPPED_TOOLBAR_ANNOTATIONS) from ..core.enums import Location from ..core.property_mixins import LineProps, FillProps from ..core.properties import ( Bool, Int, String, Enum, Auto, Instance, Either, List, Dict, Include, Override, TitleProp) from ..util.string import nice_join from .annotations import Legend, Title from .axes import Axis from .glyphs import Glyph from .grids import Grid from .ranges import Range, FactorRange from .renderers import Renderer, GlyphRenderer, DataRenderer, TileRenderer, DynamicImageRenderer from .sources import DataSource, ColumnDataSource from .tools import Tool, ToolEvents, Toolbar from .layouts import LayoutDOM from ..util.plot_utils import _list_attr_splat, _select_helper DEP_MSG_0_12_0 = """ Plot property '%s' was deprecated in 0.12.0 and will be removed. Use '%s' instead. """ DEFAULT_TITLE = lambda: Title(text="") class Plot(LayoutDOM): """ Model representing a plot, containing glyphs, guides, annotations. """ def __init__(self, **kwargs): if "tool_events" not in kwargs: kwargs["tool_events"] = ToolEvents() if "toolbar" in kwargs and "logo" in kwargs: raise ValueError("Conflicing properties set on plot: toolbar, logo.") if "toolbar" in kwargs and "tools" in kwargs: raise ValueError("Conflicing properties set on plot: toolbar, tools.") if "toolbar" not in kwargs: tools = kwargs.pop('tools', []) logo = kwargs.pop('logo', 'normal') kwargs["toolbar"] = Toolbar(tools=tools, logo=logo) if "border_fill" in kwargs and "border_fill_color" in kwargs: raise ValueError("Conflicting properties set on plot: border_fill, border_fill_color.") if "background_fill" in kwargs and "background_fill_color" in kwargs: raise ValueError("Conflicting properties set on plot: background_fill, background_fill_color.") super(LayoutDOM, self).__init__(**kwargs) def select(self, *args, **kwargs): ''' Query this object and all of its references for objects that match the given selector. There are a few different ways to call the ``select`` method. The most general is to supply a JSON-like query dictionary as the single argument or as keyword arguments: Args: selector (JSON-like) : some sample text Keyword Arguments: kwargs : query dict key/values as keyword arguments For convenience, queries on just names can be made by supplying the ``name`` string as the single parameter: Args: name (str) : the name to query on Also queries on just type can be made simply by supplying the ``Model`` subclass as the single parameter: Args: type (Model) : the type to query on Returns: seq[Model] Examples: .. code-block:: python # These two are equivalent p.select({"type": HoverTool}) p.select(HoverTool) # These two are also equivalent p.select({"name": "mycircle"}) p.select("mycircle") # Keyword arguments can be supplied in place of selector dict p.select({"name": "foo", "type": HoverTool}) p.select(name="foo", type=HoverTool) ''' selector = _select_helper(args, kwargs) # Want to pass selector that is a dictionary return _list_attr_splat(find(self.references(), selector, {'plot': self})) def row(self, row, gridplot): ''' Return whether this plot is in a given row of a GridPlot. Args: row (int) : index of the row to test gridplot (GridPlot) : the GridPlot to check Returns: bool ''' return self in gridplot.row(row) def column(self, col, gridplot): ''' Return whether this plot is in a given column of a GridPlot. Args: col (int) : index of the column to test gridplot (GridPlot) : the GridPlot to check Returns: bool ''' return self in gridplot.column(col) def _axis(self, *sides): objs = [] for s in sides: objs.extend(getattr(self, s, [])) axis = [obj for obj in objs if isinstance(obj, Axis)] return _list_attr_splat(axis) @property def xaxis(self): """ Splattable list of :class:`~bokeh.models.axes.Axis` objects for the x dimension. """ return self._axis("above", "below") @property def yaxis(self): """ Splattable list of :class:`~bokeh.models.axes.Axis` objects for the y dimension. """ return self._axis("left", "right") @property def axis(self): """ Splattable list of :class:`~bokeh.models.axes.Axis` objects. """ return _list_attr_splat(self.xaxis + self.yaxis) @property def legend(self): """Splattable list of :class:`~bokeh.models.annotations.Legend` objects. """ legends = [obj for obj in self.renderers if isinstance(obj, Legend)] return _list_attr_splat(legends) def _grid(self, dimension): grid = [obj for obj in self.renderers if isinstance(obj, Grid) and obj.dimension==dimension] return _list_attr_splat(grid) @property def xgrid(self): """ Splattable list of :class:`~bokeh.models.grids.Grid` objects for the x dimension. """ return self._grid(0) @property def ygrid(self): """ Splattable list of :class:`~bokeh.models.grids.Grid` objects for the y dimension. """ return self._grid(1) @property def grid(self): """ Splattable list of :class:`~bokeh.models.grids.Grid` objects. """ return _list_attr_splat(self.xgrid + self.ygrid) @property def tools(self): return self.toolbar.tools @tools.setter def tools(self, tools): self.toolbar.tools = tools def add_layout(self, obj, place='center'): ''' Adds an object to the plot in a specified place. Args: obj (Renderer) : the object to add to the Plot place (str, optional) : where to add the object (default: 'center') Valid places are: 'left', 'right', 'above', 'below', 'center'. Returns: None ''' valid_places = ['left', 'right', 'above', 'below', 'center'] if place not in valid_places: raise ValueError( "Invalid place '%s' specified. Valid place values are: %s" % (place, nice_join(valid_places)) ) if hasattr(obj, 'plot'): if obj.plot is not None: raise ValueError("object to be added already has 'plot' attribute set") obj.plot = self self.renderers.append(obj) if place is not 'center': getattr(self, place).append(obj) def add_tools(self, *tools): ''' Adds tools to the plot. Args: *tools (Tool) : the tools to add to the Plot Returns: None ''' if not all(isinstance(tool, Tool) for tool in tools): raise ValueError("All arguments to add_tool must be Tool subclasses.") for tool in tools: if tool.plot is not None: raise ValueError("tool %s to be added already has 'plot' attribute set" % tool) tool.plot = self if hasattr(tool, 'overlay'): self.renderers.append(tool.overlay) self.toolbar.tools.append(tool) def add_glyph(self, source_or_glyph, glyph=None, **kw): ''' Adds a glyph to the plot with associated data sources and ranges. This function will take care of creating and configuring a Glyph object, and then add it to the plot's list of renderers. Args: source (DataSource) : a data source for the glyphs to all use glyph (Glyph) : the glyph to add to the Plot Keyword Arguments: Any additional keyword arguments are passed on as-is to the Glyph initializer. Returns: GlyphRenderer ''' if glyph is not None: source = source_or_glyph else: source, glyph = ColumnDataSource(), source_or_glyph if not isinstance(source, DataSource): raise ValueError("'source' argument to add_glyph() must be DataSource subclass") if not isinstance(glyph, Glyph): raise ValueError("'glyph' argument to add_glyph() must be Glyph subclass") g = GlyphRenderer(data_source=source, glyph=glyph, **kw) self.renderers.append(g) return g def add_tile(self, tile_source, **kw): '''Adds new TileRenderer into the Plot.renderers Args: tile_source (TileSource) : a tile source instance which contain tileset configuration Keyword Arguments: Additional keyword arguments are passed on as-is to the tile renderer Returns: TileRenderer : TileRenderer ''' tile_renderer = TileRenderer(tile_source=tile_source, **kw) self.renderers.append(tile_renderer) return tile_renderer def add_dynamic_image(self, image_source, **kw): '''Adds new DynamicImageRenderer into the Plot.renderers Args: image_source (ImageSource) : a image source instance which contain image configuration Keyword Arguments: Additional keyword arguments are passed on as-is to the dynamic image renderer Returns: DynamicImageRenderer : DynamicImageRenderer ''' image_renderer = DynamicImageRenderer(image_source=image_source, **kw) self.renderers.append(image_renderer) return image_renderer @validation.error(REQUIRED_RANGE) def _check_required_range(self): missing = [] if not self.x_range: missing.append('x_range') if not self.y_range: missing.append('y_range') if missing: return ", ".join(missing) + " [%s]" % self @validation.warning(MISSING_RENDERERS) def _check_missing_renderers(self): if len(self.renderers) == 0: return str(self) @validation.warning(NO_DATA_RENDERERS) def _check_no_data_renderers(self): if len(self.select(DataRenderer)) == 0: return str(self) @validation.warning(MALFORMED_CATEGORY_LABEL) def _check_colon_in_category_label(self): if not self.x_range: return if not self.y_range: return broken = [] for range_name in ['x_range', 'y_range']: category_range = getattr(self, range_name) if not isinstance(category_range, FactorRange): continue for value in category_range.factors: if not isinstance(value, string_types): break if ':' in value: broken.append((range_name, value)) break if broken: field_msg = ' '.join('[range:%s] [first_value: %s]' % (field, value) for field, value in broken) return '%s [renderer: %s]' % (field_msg, self) @validation.warning(SNAPPED_TOOLBAR_ANNOTATIONS) def _check_snapped_toolbar_and_axis(self): if not self.toolbar_sticky: return if self.toolbar_location is None: return objs = getattr(self, self.toolbar_location) if len(objs) > 0: return str(self) __deprecated_attributes__ = ( 'background_fill', 'border_fill', 'logo', 'tools', 'responsive', 'title_text_baseline', 'title_text_align', 'title_text_alpha', 'title_text_color', 'title_text_font_style', 'title_text_font_size', 'title_text_font', 'title_standoff' ) x_range = Instance(Range, help=""" The (default) data range of the horizontal dimension of the plot. """) y_range = Instance(Range, help=""" The (default) data range of the vertical dimension of the plot. """) x_mapper_type = Either(Auto, String, help=""" What kind of mapper to use to convert x-coordinates in data space into x-coordinates in screen space. Typically this can be determined automatically, but this property can be useful to, e.g., show datetime values as floating point "seconds since epoch" instead of formatted dates. """) y_mapper_type = Either(Auto, String, help=""" What kind of mapper to use to convert y-coordinates in data space into y-coordinates in screen space. Typically this can be determined automatically, but this property can be useful to, e.g., show datetime values as floating point "seconds since epoch" instead of formatted dates """) extra_x_ranges = Dict(String, Instance(Range), help=""" Additional named ranges to make available for mapping x-coordinates. This is useful for adding additional axes. """) extra_y_ranges = Dict(String, Instance(Range), help=""" Additional named ranges to make available for mapping y-coordinates. This is useful for adding additional axes. """) hidpi = Bool(default=True, help=""" Whether to use HiDPI mode when available. """) title = TitleProp(default=DEFAULT_TITLE, help=""" A title for the plot. Can be a text string or a Title annotation. Default is Title(text=""). """) title_location = Enum(Location, default="above", help=""" Where the title will be located. Titles on the left or right side will be rotated. """) outline_props = Include(LineProps, help=""" The %s for the plot border outline. """) outline_line_color = Override(default="#e5e5e5") renderers = List(Instance(Renderer), help=""" A list of all renderers for this plot, including guides and annotations in addition to glyphs and markers. This property can be manipulated by hand, but the ``add_glyph`` and ``add_layout`` methods are recommended to help make sure all necessary setup is performed. """) toolbar = Instance(Toolbar, help=""" The toolbar associated with this plot which holds all the tools. The toolbar is automatically created with the plot. """) toolbar_location = Enum(Location, default="right", help=""" Where the toolbar will be located. If set to None, no toolbar will be attached to the plot. """) toolbar_sticky = Bool(default=True, help=""" Stick the toolbar to the edge of the plot. Default: True. If False, the toolbar will be outside of the axes, titles etc. """) tool_events = Instance(ToolEvents, help=""" A ToolEvents object to share and report tool events. """) left = List(Instance(Renderer), help=""" A list of renderers to occupy the area to the left of the plot. """) right = List(Instance(Renderer), help=""" A list of renderers to occupy the area to the right of the plot. """) above = List(Instance(Renderer), help=""" A list of renderers to occupy the area above of the plot. """) below = List(Instance(Renderer), help=""" A list of renderers to occupy the area below of the plot. """) plot_height = Int(600, help=""" Total height of the entire plot (including any axes, titles, border padding, etc.) .. note:: This corresponds directly to the height of the HTML canvas that will be used. """) plot_width = Int(600, help=""" Total width of the entire plot (including any axes, titles, border padding, etc.) .. note:: This corresponds directly to the width of the HTML canvas that will be used. """) background_props = Include(FillProps, help=""" The %s for the plot background style. """) background_fill_color = Override(default='#ffffff') border_props = Include(FillProps, help=""" The %s for the plot border style. """) border_fill_color = Override(default='#ffffff') min_border_top = Int(help=""" Minimum size in pixels of the padding region above the top of the central plot region. .. note:: This is a *minimum*. The padding region may expand as needed to accommodate titles or axes, etc. """) min_border_bottom = Int(help=""" Minimum size in pixels of the padding region below the bottom of the central plot region. .. note:: This is a *minimum*. The padding region may expand as needed to accommodate titles or axes, etc. """) min_border_left = Int(help=""" Minimum size in pixels of the padding region to the left of the central plot region. .. note:: This is a *minimum*. The padding region may expand as needed to accommodate titles or axes, etc. """) min_border_right = Int(help=""" Minimum size in pixels of the padding region to the right of the central plot region. .. note:: This is a *minimum*. The padding region may expand as needed to accommodate titles or axes, etc. """) min_border = Int(5, help=""" A convenience property to set all all the ``min_border_X`` properties to the same value. If an individual border property is explicitly set, it will override ``min_border``. """) h_symmetry = Bool(True, help=""" Whether the total horizontal padding on both sides of the plot will be made equal (the left or right padding amount, whichever is larger). """) v_symmetry = Bool(False, help=""" Whether the total vertical padding on both sides of the plot will be made equal (the top or bottom padding amount, whichever is larger). """) lod_factor = Int(10, help=""" Decimation factor to use when applying level-of-detail decimation. """) lod_threshold = Int(2000, help=""" A number of data points, above which level-of-detail downsampling may be performed by glyph renderers. Set to ``None`` to disable any level-of-detail downsampling. """) lod_interval = Int(300, help=""" Interval (in ms) during which an interactive tool event will enable level-of-detail downsampling. """) lod_timeout = Int(500, help=""" Timeout (in ms) for checking whether interactive tool events are still occurring. Once level-of-detail mode is enabled, a check is made every ``lod_timeout`` ms. If no interactive tool events have happened, level-of-detail mode is disabled. """) webgl = Bool(False, help=""" Whether WebGL is enabled for this plot. If True, the glyphs that support this will render via WebGL instead of the 2D canvas. """) # # DEPRECATED PROPERTIES # @property def responsive(self): warnings.warn(DEP_MSG_0_12_0 % ('responsive', 'Plot.sizing_mode')) return self.sizing_mode != "fixed" @responsive.setter def responsive(self, value): warnings.warn(DEP_MSG_0_12_0 % ('responsive', 'Plot.sizing_mode')) warnings.warn(""" The 'responsive' property has been deprecated in 0.12.0. It has been replaced by 'sizing_mode' which accepts one of five modes: fixed, scale_width, scale_height, scale_both, stretch_both 'responsive = False' is the equivalent of 'sizing_mode = "fixed"' 'responsive = True' is the equivalent of 'sizing_mode = "scale_width"' """) if value is True: self.sizing_mode = "scale_width" elif value is False: self.sizing_mode = "fixed" else: raise ValueError("Plot.responsive only accepts True or False, got: %r" % value) @property def background_fill(self): warnings.warn( """ Plot property 'background_fill' was deprecated in Bokeh 0.11.0 and will be removed. Use 'background_fill_color' instead. """) return self.background_fill_color @background_fill.setter def background_fill(self, color): warnings.warn( """ Plot property 'background_fill' was deprecated in Bokeh 0.11.0 and will be removed. Use 'background_fill_color' instead. """) self.background_fill_color = color @property def border_fill(self): warnings.warn( """ Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and will be removed. Use 'border_fill_color' instead. """) return self.border_fill_color @border_fill.setter def border_fill(self, color): warnings.warn( """ Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and will be removed. Use 'border_fill_color' instead. """) self.border_fill_color = color @property def logo(self): warnings.warn(DEP_MSG_0_12_0 % ('logo', 'Plot.toolbar.logo')) return self.toolbar.logo @logo.setter def logo(self, value): warnings.warn(DEP_MSG_0_12_0 % ('logo', 'Plot.toolbar.logo')) self.toolbar.logo = value @property def title_standoff(self): warnings.warn(DEP_MSG_0_12_0 % ('title_standoff', 'Plot.title.offset')) return self.title.offset @title_standoff.setter def title_standoff(self, value): warnings.warn(DEP_MSG_0_12_0 % ('title_standoff', 'Plot.title.offset')) self.title.offset = value @property def title_text_font(self): warnings.warn(DEP_MSG_0_12_0 % ('title_text_font', 'Plot.title.text_font')) return self.title.text_font @title_text_font.setter def title_text_font(self, value): warnings.warn(DEP_MSG_0_12_0 % ('title_text_font', 'Plot.title.text_font')) self.title.text_font = value @property def title_text_font_size(self): warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_size', 'Plot.title.text_font_size')) return self.title.text_font_size @title_text_font_size.setter def title_text_font_size(self, value): warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_size', 'Plot.title.text_font_size')) self.title.text_font_size = value @property def title_text_font_style(self): warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_style', 'Plot.title.text_font_style')) return self.title.text_font_style @title_text_font_style.setter def title_text_font_style(self, value): warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_style', 'Plot.title.text_font_style')) self.title.text_font_style = value @property def title_text_color(self): warnings.warn(DEP_MSG_0_12_0 % ('title_text_color', 'Plot.title.text_color')) return self.title.text_color @title_text_color.setter def title_text_color(self, value): warnings.warn(DEP_MSG_0_12_0 % ('title_text_color', 'Plot.title.text_color')) self.title.text_color = value @property def title_text_alpha(self): warnings.warn(DEP_MSG_0_12_0 % ('title_text_alpha', 'Plot.title.text_alpha')) return self.title.text_alpha @title_text_alpha.setter def title_text_alpha(self, value): warnings.warn(DEP_MSG_0_12_0 % ('title_text_alpha', 'Plot.title.text_alpha')) self.title.text_alpha = value @property def title_text_align(self): warnings.warn(DEP_MSG_0_12_0 % ('title_text_align', 'Plot.title.align')) warnings.warn("""``title_text_align`` was deprecated in 0.12.0 and is no longer available on the new Title object. There is a new ``plot.title.title_align`` which is similar but not exactly the same. The new ``title_align`` both positions and aligns the title. If you need the exact ``title_text_align`` behavior, please add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``. """) return self.title.align @title_text_align.setter def title_text_align(self, value): warnings.warn(DEP_MSG_0_12_0 % ('title_text_align', 'Plot.title.align')) warnings.warn("""``title_text_align`` was deprecated in 0.12.0 and is no longer available on the new Title object. There is a new ``plot.title.title_align`` which is similar but not exactly the same. The new ``title_align`` both positions and aligns the title. If you need the exact ``title_text_align`` behavior, please add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``. """) self.title.align = value @property def title_text_baseline(self): warnings.warn("""title_text_baseline was deprecated in 0.12.0 and is no longer available on the new Title object. If you need to alter the text_baseline, please add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``. """) return None @title_text_baseline.setter def title_text_baseline(self, value): warnings.warn("""title_text_baseline was deprecated in 0.12.0 and is no longer available on the new Title object. If you need to alter the text_baseline, please add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``. """)
import sys import time import datetime import os import os.path import io import json import pprint import ssl if hasattr(ssl, '_create_unverified_context'): ssl._create_default_https_context = ssl._create_unverified_context sys.path.insert(0,os.path.abspath("venv/lib/python2.7/site-packages/")) from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor import requests import magic import blessings import dateutil.parser import dateutil.tz import biokbase.Transform.Client import biokbase.Transform.script_utils import biokbase.userandjobstate.client import biokbase.workspace.client logger = biokbase.Transform.script_utils.stderrlogger(__file__) def show_workspace_object_list(workspace_url, workspace_name, object_name, token): print term.blue("\tYour KBase data objects:") c = biokbase.workspace.client.Workspace(workspace_url, token=token) object_list = c.list_objects({"workspaces": [workspace_name]}) object_list = [x for x in object_list if object_name in x[1]] for x in sorted(object_list): elapsed_time = datetime.datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc()) - dateutil.parser.parse(x[3]) print "\t\thow_recent: {0}\n\t\tname: {1}\n\t\ttype: {2}\n\t\tsize: {3:d}\n".format(elapsed_time, x[1], x[2], x[-2]) def show_workspace_object_contents(workspace_url, workspace_name, object_name, field_list, token): print term.blue("\tYour KBase data object {0}:".format(object_name)) c = biokbase.workspace.client.Workspace(workspace_url, token=token) object_contents = c.get_objects([{"workspace": workspace_name, "name": object_name}])[0] if field_list is None or len(field_list) == 0: field_list = object_contents.keys() for k in field_list: print "\t\t{0}: \n".format(k) lines = json.dumps(object_contents[k], sort_keys=True, indent=4).split("\n") for n in lines: print "\t\t\t{0}".format(n) def show_job_progress(ujs_url, awe_url, awe_id, ujs_id, token): c = biokbase.userandjobstate.client.UserAndJobState(url=ujs_url, token=token) completed = ["complete", "completed", "success"] error = ["error", "fail"] term = blessings.Terminal() header = dict() header["Authorization"] = "Oauth {0}".format(token) print term.blue("\tUJS Job Status:") # wait for UJS to complete last_status = "" time_limit = 40 start = datetime.datetime.utcnow() while 1: try: status = c.get_job_status(ujs_id) except Exception, e: print term.red("\t\tIssue connecting to UJS!") status[1] = "error" status[2] = "Caught Exception" if (datetime.datetime.utcnow() - start).seconds > time_limit: print "\t\tJob is taking longer than it should, check debugging messages for more information." print c.get_job_status(ujs_id) status[1] = "error" status[2] = "Timeout" if last_status != status[2]: print "\t\t{0} status update: {1}".format(status[0], status[2]) last_status = status[2] if status[1] in completed: print term.green("\t\tKBase conversion completed!\n") return status break elif status[1] in error: print term.red("\t\tOur job failed!\n") print term.bold("Additional AWE job details for debugging") # check awe job output awe_details = requests.get("{0}/job/{1}".format(awe_url,awe_id), headers=header, verify=True) job_info = awe_details.json()["data"] print term.red(json.dumps(job_info, sort_keys=True, indent=4)) awe_stdout = requests.get("{0}/work/{1}?report=stdout".format(awe_url,job_info["tasks"][0]["taskid"]+"_0"), headers=header, verify=True) print term.red("STDOUT : " + json.dumps(awe_stdout.json()["data"], sort_keys=True, indent=4)) awe_stderr = requests.get("{0}/work/{1}?report=stderr".format(awe_url,job_info["tasks"][0]["taskid"]+"_0"), headers=header, verify=True) print term.red("STDERR : " + json.dumps(awe_stderr.json()["data"], sort_keys=True, indent=4)) break def convert(transform_url, options, token): c = biokbase.Transform.Client.Transform(url=transform_url, token=token) response = c.convert(options) return response if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='KBase Upload demo and client') parser.add_argument('--demo', action="store_true") parser.add_argument('--ujs_service_url', nargs='?', help='UserandJobState service for monitoring progress', const="", default="https://kbase.us/services/userandjobstate/") parser.add_argument('--workspace_service_url', nargs='?', help='Workspace service for KBase objects', const="", default="https://kbase.us/services/ws/") parser.add_argument('--awe_service_url', nargs='?', help='AWE service for additional job monitoring', const="", default="http://140.221.67.242:7080") parser.add_argument('--transform_service_url', nargs='?', help='Transform service that handles the data conversion to KBase', const="", default="http://140.221.67.242:7778/") parser.add_argument('--source_kbase_type', nargs='?', help='the source type of the data') parser.add_argument('--source_workspace_name', nargs='?', help='name of the source workspace', const="", default="gavinws") parser.add_argument('--source_object_name', nargs='?', help='name of the workspace object', const="", default="") parser.add_argument('--destination_kbase_type', nargs='?', help='the kbase object type to create') parser.add_argument('--destination_workspace_name', nargs='?', help='name of the workspace where your objects should be created', const="", default="upload_testing") parser.add_argument('--destination_object_name', nargs='?', help='name of the workspace object to create') args = parser.parse_args() token = os.environ.get("KB_AUTH_TOKEN") if token is None: if os.path.exists(os.path.expanduser("~/.kbase_config")): f = open(os.path.expanduser("~/.kbase_config"), 'r') config = f.read() if "token=" in config: token = config.split("token=")[1].split("\n",1)[0] else: raise Exception("Unable to find KBase token!") else: raise Exception("Unable to find KBase token!") if not args.demo: user_inputs = {"source_kbase_type": args.source_kbase_type, "source_workspace_name": args.source_workspace_name, "source_object_name": args.source_object_name, "destination_kbase_type": args.destination_kbase_type, "destination_workspace_name": args.destination_workspace_name, "destination_object_name": args.destination_object_name, "optional_arguments": {"transform": {}}} demos = [user_inputs] else: bigyakattack = {"source_kbase_type": "KBaseFile.AssemblyFile", "source_workspace_name": "gavinws", "source_object_name": "final.assembly.fasta", "destination_kbase_type": "KBaseGenomes.ContigSet", "destination_workspace_name": "upload_testing", "destination_object_name": "tastythunderyak", "optional_arguments": {"transform": {}}} snacksonyaks = {"source_kbase_type": "KBaseFile.AssemblyFile", "source_workspace_name": "gavinws", "source_object_name": "kayak", "destination_kbase_type": "KBaseGenomes.ContigSet", "destination_workspace_name": "upload_testing", "destination_object_name": "bitterthunderyak", "optional_arguments": {"transform": {}}} demos = [bigyakattack, snacksonyaks] services = {"ujs": args.ujs_service_url, "workspace": args.workspace_service_url, "awe": args.awe_service_url, "transform": args.transform_service_url} stamp = datetime.datetime.now().isoformat() os.mkdir(stamp) term = blessings.Terminal() for demo_inputs in demos: source_kbase_type = demo_inputs["source_kbase_type"] source_workspace_name = demo_inputs["source_workspace_name"] source_object_name = demo_inputs["source_object_name"] destination_kbase_type = demo_inputs["destination_kbase_type"] destination_workspace_name = demo_inputs["destination_workspace_name"] destination_object_name = demo_inputs["destination_object_name"] print "\n\n" print term.bold("#"*80) print term.white_on_black("Converting {0} => {1}".format(source_kbase_type,destination_kbase_type)) print term.bold("#"*80) conversionDownloadPath = os.path.join(stamp, source_kbase_type + "_to_" + destination_kbase_type) try: os.mkdir(conversionDownloadPath) except: pass downloadPath = os.path.join(conversionDownloadPath) try: print term.bold("Step 1: Make KBase type conversion request") convert_response = convert(services["transform"], {"source_kbase_type": source_kbase_type, "source_workspace_name": source_workspace_name, "source_object_name": source_object_name, "destination_kbase_type": destination_kbase_type, "destination_workspace_name": destination_workspace_name, "destination_object_name": destination_object_name, "optional_arguments": {"transform": {}} }, token) print term.blue("\tTransform service conversion requested:") print "\t\tConverting from {0} => {1}".format(source_kbase_type, destination_kbase_type) print "\t\tSaving to workspace {0} with object name {1}".format(destination_workspace_name,destination_object_name) print term.blue("\tTransform service responded with job ids:") print "\t\tAWE job id {0}\n\t\tUJS job id {1}".format(convert_response[0], convert_response[1]) show_job_progress(services["ujs"], services["awe"], convert_response[0], convert_response[1], token) print term.green("\tConversion successful. Yaks win.\n\n") show_workspace_object_list(services["workspace"], destination_workspace_name, destination_object_name, token) show_workspace_object_contents(services["workspace"], destination_workspace_name, destination_object_name, ["creator","info"], token) except Exception, e: print e.message raise
"""Job controller utils.""" import logging import os import subprocess import sys from reana_db.database import Session from reana_db.models import Workflow def singleton(cls): """Singelton decorator.""" instances = {} def getinstance(**kwargs): if cls not in instances: instances[cls] = cls(**kwargs) return instances[cls] return getinstance def update_workflow_logs(workflow_uuid, log_message): """Update workflow logs.""" try: logging.info("Storing workflow logs: {}".format(workflow_uuid)) workflow = Session.query(Workflow).filter_by(id_=workflow_uuid).one_or_none() workflow.logs += "\n" + log_message Session.commit() except Exception as e: logging.error("Exception while saving logs: {}".format(str(e)), exc_info=True) def initialize_krb5_token(workflow_uuid): """Create kerberos ticket from mounted keytab_file.""" cern_user = os.environ.get("CERN_USER") keytab_file = os.environ.get("CERN_KEYTAB") cmd = "kinit -kt /etc/reana/secrets/{} {}@CERN.CH".format(keytab_file, cern_user) if cern_user: try: subprocess.check_output(cmd, shell=True) except subprocess.CalledProcessError as err: msg = "Executing: {} \n Authentication failed: {}".format(cmd, err) Workflow.update_workflow_status( db_session=Session, workflow_uuid=workflow_uuid, status=None, new_logs=msg, ) logging.error(msg, exc_info=True) sys.exit(1) else: msg = "CERN_USER is not set." logging.error(msg, exc_info=True) Workflow.update_workflow_status( db_session=Session, workflow_uuid=workflow_uuid, status=None, new_logs=msg ) logging.error(msg, exc_info=True) @singleton class SSHClient: """SSH Client.""" import paramiko def __init__(self, hostname=None, port=None): """Initialize ssh client.""" self.hostname = hostname self.port = port self.establish_connection() def establish_connection(self): """Establish the connection.""" self.ssh_client = self.paramiko.SSHClient() self.ssh_client.set_missing_host_key_policy(self.paramiko.AutoAddPolicy()) self.ssh_client.connect(hostname=self.hostname, port=self.port, gss_auth=True) def exec_command(self, command): """Execute command and return exit code.""" if not self.ssh_client.get_transport().active: self.establish_connection() try: stdin, stdout, stderr = self.ssh_client.exec_command(command) if stdout.channel.recv_exit_status() != 0: raise Exception(stderr.read().decode("utf-8")) return stdout.read().decode("utf-8") except Exception as e: logging.error( "Exception while executing cmd: {} \n{}".format(command, str(e)), exc_info=True, )
import argparse import logging import os import pytest import subprocess import tempfile import time from mock import patch import teuthology from teuthology import misc from teuthology.config import set_config_attr from teuthology.openstack import TeuthologyOpenStack, OpenStack, OpenStackInstance from teuthology.openstack import NoFlavorException import scripts.openstack class TestOpenStackBase(object): def setup(self): OpenStack.token = None OpenStack.token_expires = None self.environ = {} for k in os.environ.keys(): if k.startswith('OS_'): self.environ[k] = os.environ[k] def teardown(self): OpenStack.token = None OpenStack.token_expires = None for k in os.environ.keys(): if k.startswith('OS_'): if k in self.environ: os.environ[k] = self.environ[k] else: del os.environ[k] class TestOpenStackInstance(TestOpenStackBase): teuthology_instance = """ { "OS-EXT-STS:task_state": null, "addresses": "Ext-Net=167.114.233.32", "image": "Ubuntu 14.04 (0d315a8d-75e3-418a-80e4-48e62d599627)", "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2015-08-17T12:22:13.000000", "flavor": "vps-ssd-1 (164fcc7e-7771-414f-a607-b388cb7b7aa0)", "id": "f3ca32d7-212b-458b-a0d4-57d1085af953", "security_groups": [ { "name": "default" } ], "user_id": "3a075820e5d24fda96cd340b87fd94e9", "OS-DCF:diskConfig": "AUTO", "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "project_id": "62cf1be03cec403c8ed8e64df55732ea", "config_drive": "", "status": "ACTIVE", "updated": "2015-11-03T13:48:53Z", "hostId": "bcdf964b6f724e573c07156ff85b4db1707f6f0969f571cf33e0468d", "OS-SRV-USG:terminated_at": null, "key_name": "loic", "properties": "", "OS-EXT-AZ:availability_zone": "nova", "name": "mrdarkdragon", "created": "2015-08-17T12:21:31Z", "os-extended-volumes:volumes_attached": [{"id": "627e2631-fbb3-48cd-b801-d29cd2a76f74"}, {"id": "09837649-0881-4ee2-a560-adabefc28764"}, {"id": "44e5175b-6044-40be-885a-c9ddfb6f75bb"}] } """ teuthology_instance_no_addresses = """ { "OS-EXT-STS:task_state": null, "addresses": "", "image": "Ubuntu 14.04 (0d315a8d-75e3-418a-80e4-48e62d599627)", "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2015-08-17T12:22:13.000000", "flavor": "vps-ssd-1 (164fcc7e-7771-414f-a607-b388cb7b7aa0)", "id": "f3ca32d7-212b-458b-a0d4-57d1085af953", "security_groups": [ { "name": "default" } ], "user_id": "3a075820e5d24fda96cd340b87fd94e9", "OS-DCF:diskConfig": "AUTO", "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "project_id": "62cf1be03cec403c8ed8e64df55732ea", "config_drive": "", "status": "ACTIVE", "updated": "2015-11-03T13:48:53Z", "hostId": "bcdf964b6f724e573c07156ff85b4db1707f6f0969f571cf33e0468d", "OS-SRV-USG:terminated_at": null, "key_name": "loic", "properties": "", "OS-EXT-AZ:availability_zone": "nova", "name": "mrdarkdragon", "created": "2015-08-17T12:21:31Z", "os-extended-volumes:volumes_attached": [] } """ @classmethod def setup_class(self): if 'OS_AUTH_URL' not in os.environ: pytest.skip('no OS_AUTH_URL environment variable') def test_init(self): with patch.multiple( misc, sh=lambda cmd: self.teuthology_instance, ): o = OpenStackInstance('NAME') assert o['id'] == 'f3ca32d7-212b-458b-a0d4-57d1085af953' o = OpenStackInstance('NAME', {"id": "OTHER"}) assert o['id'] == "OTHER" def test_get_created(self): with patch.multiple( misc, sh=lambda cmd: self.teuthology_instance, ): o = OpenStackInstance('NAME') assert o.get_created() > 0 def test_exists(self): with patch.multiple( misc, sh=lambda cmd: self.teuthology_instance, ): o = OpenStackInstance('NAME') assert o.exists() def sh_raises(cmd): raise subprocess.CalledProcessError('FAIL', 'BAD') with patch.multiple( misc, sh=sh_raises, ): o = OpenStackInstance('NAME') assert not o.exists() def test_volumes(self): with patch.multiple( misc, sh=lambda cmd: self.teuthology_instance, ): o = OpenStackInstance('NAME') assert len(o.get_volumes()) == 3 def test_get_addresses(self): answers = [ self.teuthology_instance_no_addresses, self.teuthology_instance, ] def sh(self): return answers.pop(0) with patch.multiple( misc, sh=sh, ): o = OpenStackInstance('NAME') assert o.get_addresses() == 'Ext-Net=167.114.233.32' def test_get_ip_neutron(self): instance_id = '8e1fd70a-3065-46f8-9c30-84dc028c1834' ip = '10.10.10.4' def sh(cmd): if 'neutron subnet-list' in cmd: return """ [ { "ip_version": 6, "id": "c45b9661-b2ba-4817-9e3a-f8f63bf32989" }, { "ip_version": 4, "id": "e03a3dbc-afc8-4b52-952e-7bf755397b50" } ] """ elif 'neutron port-list' in cmd: return (""" [ { "device_id": "915504ad-368b-4cce-be7c-4f8a83902e28", "fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"10.10.10.1\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc::1\\"}" }, { "device_id": "{instance_id}", "fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"{ip}\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc:f816:3eff:fe07:76c1\\"}" }, { "device_id": "17e4a968-4caa-4cee-8e4b-f950683a02bd", "fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"10.10.10.5\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc:f816:3eff:fe9c:37f0\\"}" } ] """.replace('{instance_id}', instance_id). replace('{ip}', ip)) else: raise Exception("unexpected " + cmd) with patch.multiple( misc, sh=sh, ): assert ip == OpenStackInstance( instance_id, { 'id': instance_id }, ).get_ip_neutron() class TestOpenStack(TestOpenStackBase): flavors = """[ { "Name": "eg-120-ssd", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 800, "ID": "008f75de-c467-4d15-8f70-79c8fbe19538" }, { "Name": "hg-60", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 1600, "ID": "0297d7ac-fe6f-4ff1-b6e7-0b8b0908c94f" }, { "Name": "win-sp-120-ssd-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "039e31f2-6541-46c8-85cf-7f47fab7ad78" }, { "Name": "win-sp-60", "RAM": 60000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 400, "ID": "0417a0e6-f68a-4b8b-a642-ca5ecb9652f7" }, { "Name": "hg-120-ssd", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 800, "ID": "042aefc6-b713-4a7e-ada5-3ff81daa1960" }, { "Name": "win-sp-60-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "0609290c-ad2a-40f0-8c66-c755dd38fe3f" }, { "Name": "win-eg-120", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 800, "ID": "0651080f-5d07-44b1-a759-7ea4594b669e" }, { "Name": "win-sp-240", "RAM": 240000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 1600, "ID": "07885848-8831-486d-8525-91484c09cc7e" }, { "Name": "win-hg-60-ssd", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 800, "ID": "079aa0a2-5e48-4e58-8205-719bc962736e" }, { "Name": "eg-120", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 1600, "ID": "090f8b8c-673c-4ab8-9a07-6e54a8776e7b" }, { "Name": "win-hg-15-ssd-flex", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "10e10c58-d29f-4ff6-a1fd-085c35a3bd9b" }, { "Name": "eg-15-ssd", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 200, "ID": "1340a920-0f2f-4c1b-8d74-e2502258da73" }, { "Name": "win-eg-30-ssd-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "13e54752-fbd0-47a6-aa93-e5a67dfbc743" }, { "Name": "eg-120-ssd-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 50, "ID": "15c07a54-2dfb-41d9-aa73-6989fd8cafc2" }, { "Name": "win-eg-120-ssd-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 50, "ID": "15e0dfcc-10f4-4e70-8ac1-30bc323273e2" }, { "Name": "vps-ssd-1", "RAM": 2000, "Ephemeral": 0, "VCPUs": 1, "Is Public": true, "Disk": 10, "ID": "164fcc7e-7771-414f-a607-b388cb7b7aa0" }, { "Name": "win-sp-120-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "169415e1-0979-4527-94fb-638c885bbd8c" }, { "Name": "win-hg-60-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "16f13d5b-be27-4b8b-88da-959d3904d3ba" }, { "Name": "win-sp-30-ssd", "RAM": 30000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 100, "ID": "1788102b-ab80-4a0c-b819-541deaca7515" }, { "Name": "win-sp-240-flex", "RAM": 240000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "17bcfa14-135f-442f-9397-a4dc25265560" }, { "Name": "win-eg-60-ssd-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "194ca9ba-04af-4d86-ba37-d7da883a7eab" }, { "Name": "win-eg-60-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "19ff8837-4751-4f6c-a82b-290bc53c83c1" }, { "Name": "win-eg-30-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "1aaef5e5-4df9-4462-80d3-701683ab9ff0" }, { "Name": "eg-15", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 400, "ID": "1cd85b81-5e4d-477a-a127-eb496b1d75de" }, { "Name": "hg-120", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 1600, "ID": "1f1efedf-ec91-4a42-acd7-f5cf64b02d3c" }, { "Name": "hg-15-ssd-flex", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "20347a07-a289-4c07-a645-93cb5e8e2d30" }, { "Name": "win-eg-7-ssd", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 100, "ID": "20689394-bd77-4f4d-900e-52cc8a86aeb4" }, { "Name": "win-sp-60-ssd-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "21104d99-ba7b-47a0-9133-7e884710089b" }, { "Name": "win-sp-120-ssd", "RAM": 120000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 400, "ID": "23c21ecc-9ee8-4ad3-bd9f-aa17a3faf84e" }, { "Name": "win-hg-15-ssd", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 200, "ID": "24e293ed-bc54-4f26-8fb7-7b9457d08e66" }, { "Name": "eg-15-ssd-flex", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "25f3534a-89e5-489d-aa8b-63f62e76875b" }, { "Name": "win-eg-60", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 800, "ID": "291173f1-ea1d-410b-8045-667361a4addb" }, { "Name": "sp-30-ssd-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "2b646463-2efa-428b-94ed-4059923c3636" }, { "Name": "win-eg-120-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 50, "ID": "2c74df82-29d2-4b1a-a32c-d5633e7359b4" }, { "Name": "win-eg-15-ssd", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 200, "ID": "2fe4344f-d701-4bc4-8dcd-6d0b5d83fa13" }, { "Name": "sp-30-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "31487b30-eeb6-472f-a9b6-38ace6587ebc" }, { "Name": "win-sp-240-ssd", "RAM": 240000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 800, "ID": "325b602f-ecc4-4444-90bd-5a2cf4e0da53" }, { "Name": "win-hg-7", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 200, "ID": "377ded36-491f-4ad7-9eb4-876798b2aea9" }, { "Name": "sp-30-ssd", "RAM": 30000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 100, "ID": "382f2831-4dba-40c4-bb7a-6fadff71c4db" }, { "Name": "hg-30", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 800, "ID": "3c1d6170-0097-4b5c-a3b3-adff1b7a86e0" }, { "Name": "hg-60-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "3c669730-b5cd-4e44-8bd2-bc8d9f984ab2" }, { "Name": "sp-240-ssd-flex", "RAM": 240000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "3d66fea3-26f2-4195-97ab-fdea3b836099" }, { "Name": "sp-240-flex", "RAM": 240000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "40c781f7-d7a7-4b0d-bcca-5304aeabbcd9" }, { "Name": "hg-7-flex", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "42730e52-147d-46b8-9546-18e31e5ac8a9" }, { "Name": "eg-30-ssd", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 400, "ID": "463f30e9-7d7a-4693-944f-142067cf553b" }, { "Name": "hg-15-flex", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "534f07c6-91af-44c8-9e62-156360fe8359" }, { "Name": "win-sp-30-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "55533fdf-ad57-4aa7-a2c6-ee31bb94e77b" }, { "Name": "win-hg-60-ssd-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "58b24234-3804-4c4f-9eb6-5406a3a13758" }, { "Name": "hg-7-ssd-flex", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "596c1276-8e53-40a0-b183-cdd9e9b1907d" }, { "Name": "win-hg-30-ssd", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 400, "ID": "5c54dc08-28b9-4860-9f24-a2451b2a28ec" }, { "Name": "eg-7", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 200, "ID": "5e409dbc-3f4b-46e8-a629-a418c8497922" }, { "Name": "hg-30-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "656423ea-0551-48c6-9e0f-ec6e15952029" }, { "Name": "hg-15", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 400, "ID": "675558ea-04fe-47a2-83de-40be9b2eacd4" }, { "Name": "eg-60-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "68a8e4e1-d291-46e8-a724-fbb1c4b9b051" }, { "Name": "hg-30-ssd", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 400, "ID": "6ab72807-e0a5-4e9f-bbb9-7cbbf0038b26" }, { "Name": "win-hg-30", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 800, "ID": "6e12cae3-0492-483c-aa39-54a0dcaf86dd" }, { "Name": "win-hg-7-ssd", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 100, "ID": "6ead771c-e8b9-424c-afa0-671280416422" }, { "Name": "win-hg-30-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "70ded741-8f58-4bb9-8cfd-5e838b66b5f3" }, { "Name": "win-sp-30-ssd-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "7284d104-a260-421d-8cee-6dc905107b25" }, { "Name": "win-eg-120-ssd", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 800, "ID": "72c0b262-855d-40bb-a3e9-fd989a1bc466" }, { "Name": "win-hg-7-flex", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "73961591-c5f1-436f-b641-1a506eddaef4" }, { "Name": "sp-240-ssd", "RAM": 240000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 800, "ID": "7568d834-3b16-42ce-a2c1-0654e0781160" }, { "Name": "win-eg-60-ssd", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 800, "ID": "75f7fe5c-f87a-41d8-a961-a0169d02c268" }, { "Name": "eg-7-ssd-flex", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "77e1db73-0b36-4e37-8e47-32c2d2437ca9" }, { "Name": "eg-60-ssd-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "78df4e30-98ca-4362-af68-037d958edaf0" }, { "Name": "vps-ssd-2", "RAM": 4000, "Ephemeral": 0, "VCPUs": 1, "Is Public": true, "Disk": 20, "ID": "7939cc5c-79b1-45c0-be2d-aa935d92faa1" }, { "Name": "sp-60", "RAM": 60000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 400, "ID": "80d8510a-79cc-4307-8db7-d1965c9e8ddb" }, { "Name": "win-hg-120-ssd-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 50, "ID": "835e734a-46b6-4cb2-be68-e8678fd71059" }, { "Name": "win-eg-7", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 200, "ID": "84869b00-b43a-4523-babd-d47d206694e9" }, { "Name": "win-eg-7-ssd-flex", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "852308f8-b8bf-44a4-af41-cbc27437b275" }, { "Name": "win-sp-30", "RAM": 30000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 200, "ID": "8be9dc29-3eca-499b-ae2d-e3c99699131a" }, { "Name": "win-hg-7-ssd-flex", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "8d704cfd-05b2-4d4a-add2-e2868bcc081f" }, { "Name": "eg-30", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 800, "ID": "901f77c2-73f6-4fae-b28a-18b829b55a17" }, { "Name": "sp-60-ssd-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "944b92fb-9a0c-406d-bb9f-a1d93cda9f01" }, { "Name": "eg-30-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "965472c7-eb54-4d4d-bd6e-01ebb694a631" }, { "Name": "sp-120-ssd", "RAM": 120000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 400, "ID": "97824a8c-e683-49a8-a70a-ead64240395c" }, { "Name": "hg-60-ssd", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 800, "ID": "9831d7f1-3e79-483d-8958-88e3952c7ea2" }, { "Name": "eg-60", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 1600, "ID": "9e1f13d0-4fcc-4abc-a9e6-9c76d662c92d" }, { "Name": "win-eg-30-ssd", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 400, "ID": "9e6b85fa-6f37-45ce-a3d6-11ab40a28fad" }, { "Name": "hg-120-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 50, "ID": "9ed787cc-a0db-400b-8cc1-49b6384a1000" }, { "Name": "sp-120-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "9f3cfdf7-b850-47cc-92be-33aefbfd2b05" }, { "Name": "hg-60-ssd-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "a37bdf17-e1b1-41cc-a67f-ed665a120446" }, { "Name": "win-hg-120-ssd", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 800, "ID": "aa753e73-dadb-4528-9c4a-24e36fc41bf4" }, { "Name": "win-sp-240-ssd-flex", "RAM": 240000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 50, "ID": "abc007b8-cc44-4b6b-9606-fd647b03e101" }, { "Name": "sp-120", "RAM": 120000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 800, "ID": "ac74cb45-d895-47dd-b9cf-c17778033d83" }, { "Name": "win-hg-15", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 400, "ID": "ae900175-72bd-4fbc-8ab2-4673b468aa5b" }, { "Name": "win-eg-15-ssd-flex", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "aeb37dbf-d7c9-4fd7-93f1-f3818e488ede" }, { "Name": "hg-7-ssd", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 100, "ID": "b1dc776c-b6e3-4a96-b230-850f570db3d5" }, { "Name": "sp-60-ssd", "RAM": 60000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 200, "ID": "b24df495-10f3-466e-95ab-26f0f6839a2f" }, { "Name": "win-hg-120", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 1600, "ID": "b798e44e-bf71-488c-9335-f20bf5976547" }, { "Name": "eg-7-ssd", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 100, "ID": "b94e6623-913d-4147-b2a3-34ccf6fe7a5e" }, { "Name": "eg-15-flex", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "bb5fdda8-34ec-40c8-a4e3-308b9e2c9ee2" }, { "Name": "win-eg-7-flex", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "c65384f6-4665-461a-a292-2f3f5a016244" }, { "Name": "eg-60-ssd", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 800, "ID": "c678f1a8-6542-4f9d-89af-ffc98715d674" }, { "Name": "hg-30-ssd-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "d147a094-b653-41e7-9250-8d4da3044334" }, { "Name": "sp-30", "RAM": 30000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 200, "ID": "d1acf88d-6f55-4c5c-a914-4ecbdbd50d6b" }, { "Name": "sp-120-ssd-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "d2d33e8e-58b1-4661-8141-826c47f82166" }, { "Name": "hg-120-ssd-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 50, "ID": "d7322c37-9881-4a57-9b40-2499fe2e8f42" }, { "Name": "win-hg-15-flex", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "daf597ea-fbbc-4c71-a35e-5b41d33ccc6c" }, { "Name": "win-hg-30-ssd-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "dcfd834c-3932-47a3-8b4b-cdfeecdfde2c" }, { "Name": "win-hg-60", "RAM": 60000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 1600, "ID": "def75cbd-a4b1-4f82-9152-90c65df9587b" }, { "Name": "eg-30-ssd-flex", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 50, "ID": "e04c7ad6-a5de-45f5-93c9-f3343bdfe8d1" }, { "Name": "vps-ssd-3", "RAM": 8000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 40, "ID": "e43d7458-6b82-4a78-a712-3a4dc6748cf4" }, { "Name": "win-eg-15-flex", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "e8bd3402-7310-4a0f-8b99-d9212359c957" }, { "Name": "win-eg-30", "RAM": 30000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 800, "ID": "ebf7a997-e2f8-42f4-84f7-33a3d53d1af9" }, { "Name": "eg-120-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 50, "ID": "ec852ed3-1e42-4c59-abc3-12bcd26abec8" }, { "Name": "sp-240", "RAM": 240000, "Ephemeral": 0, "VCPUs": 16, "Is Public": true, "Disk": 1600, "ID": "ed286e2c-769f-4c47-ac52-b8de7a4891f6" }, { "Name": "win-sp-60-ssd", "RAM": 60000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 200, "ID": "ed835a73-d9a0-43ee-bd89-999c51d8426d" }, { "Name": "win-eg-15", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 400, "ID": "f06056c1-a2d4-40e7-a7d8-e5bfabada72e" }, { "Name": "win-sp-120", "RAM": 120000, "Ephemeral": 0, "VCPUs": 8, "Is Public": true, "Disk": 800, "ID": "f247dc56-395b-49de-9a62-93ccc4fff4ed" }, { "Name": "eg-7-flex", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 50, "ID": "f476f959-ffa6-46f2-94d8-72293570604d" }, { "Name": "sp-60-flex", "RAM": 60000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 50, "ID": "f52db47a-315f-49d4-bc5c-67dd118e7ac0" }, { "Name": "win-hg-120-flex", "RAM": 120000, "Ephemeral": 0, "VCPUs": 32, "Is Public": true, "Disk": 50, "ID": "f6cb8144-5d98-4057-b44f-46da342fb571" }, { "Name": "hg-7", "RAM": 7000, "Ephemeral": 0, "VCPUs": 2, "Is Public": true, "Disk": 200, "ID": "fa3cc551-0358-4170-be64-56ea432b064c" }, { "Name": "hg-15-ssd", "RAM": 15000, "Ephemeral": 0, "VCPUs": 4, "Is Public": true, "Disk": 200, "ID": "ff48c2cf-c17f-4682-aaf6-31d66786f808" } ]""" @classmethod def setup_class(self): if 'OS_AUTH_URL' not in os.environ: pytest.skip('no OS_AUTH_URL environment variable') @patch('teuthology.misc.sh') def test_sorted_flavors(self, m_sh): o = OpenStack() select = '^(vps|hg)-.*ssd' m_sh.return_value = TestOpenStack.flavors flavors = o.get_sorted_flavors('arch', select) assert [u'vps-ssd-1', u'vps-ssd-2', u'hg-7-ssd-flex', u'hg-7-ssd', u'vps-ssd-3', u'hg-15-ssd-flex', u'hg-15-ssd', u'hg-30-ssd-flex', u'hg-30-ssd', u'hg-60-ssd-flex', u'hg-60-ssd', u'hg-120-ssd-flex', u'hg-120-ssd', ] == [ f['Name'] for f in flavors ] m_sh.assert_called_with("openstack --quiet flavor list -f json") def test_flavor(self): def get_sorted_flavors(self, arch, select): return [ { 'Name': 'too_small', 'RAM': 2048, 'Disk': 50, 'VCPUs': 1, }, ] with patch.multiple( OpenStack, get_sorted_flavors=get_sorted_flavors, ): with pytest.raises(NoFlavorException): hint = { 'ram': 1000, 'disk': 40, 'cpus': 2 } OpenStack().flavor(hint, 'arch') flavor = 'good-flavor' def get_sorted_flavors(self, arch, select): return [ { 'Name': flavor, 'RAM': 2048, 'Disk': 50, 'VCPUs': 2, }, ] with patch.multiple( OpenStack, get_sorted_flavors=get_sorted_flavors, ): hint = { 'ram': 1000, 'disk': 40, 'cpus': 2 } assert flavor == OpenStack().flavor(hint, 'arch') def test_flavor_range(self): flavors = [ { 'Name': 'too_small', 'RAM': 2048, 'Disk': 50, 'VCPUs': 1, }, ] def get_sorted_flavors(self, arch, select): return flavors min = { 'ram': 1000, 'disk': 40, 'cpus': 2 } good = { 'ram': 4000, 'disk': 40, 'cpus': 2 } # # there are no flavors in the required range # with patch.multiple( OpenStack, get_sorted_flavors=get_sorted_flavors, ): with pytest.raises(NoFlavorException): OpenStack().flavor_range(min, good, 'arch') # # there is one flavor in the required range # flavors.append({ 'Name': 'min', 'RAM': 2048, 'Disk': 40, 'VCPUs': 2, }) with patch.multiple( OpenStack, get_sorted_flavors=get_sorted_flavors, ): assert 'min' == OpenStack().flavor_range(min, good, 'arch') # # out of the two flavors in the required range, get the bigger one # flavors.append({ 'Name': 'good', 'RAM': 3000, 'Disk': 40, 'VCPUs': 2, }) with patch.multiple( OpenStack, get_sorted_flavors=get_sorted_flavors, ): assert 'good' == OpenStack().flavor_range(min, good, 'arch') # # there is one flavor bigger or equal to good, get this one # flavors.append({ 'Name': 'best', 'RAM': 4000, 'Disk': 40, 'VCPUs': 2, }) with patch.multiple( OpenStack, get_sorted_flavors=get_sorted_flavors, ): assert 'best' == OpenStack().flavor_range(min, good, 'arch') # # there are two flavors bigger or equal to good, get the smallest one # flavors.append({ 'Name': 'too_big', 'RAM': 30000, 'Disk': 400, 'VCPUs': 20, }) with patch.multiple( OpenStack, get_sorted_flavors=get_sorted_flavors, ): assert 'best' == OpenStack().flavor_range(min, good, 'arch') def test_interpret_hints(self): defaults = { 'machine': { 'ram': 0, 'disk': 0, 'cpus': 0, }, 'volumes': { 'count': 0, 'size': 0, }, } expected_disk = 10 # first hint larger than the second expected_ram = 20 # second hint larger than the first expected_cpus = 0 # not set, hence zero by default expected_count = 30 # second hint larger than the first expected_size = 40 # does not exist in the first hint hints = [ { 'machine': { 'ram': 2, 'disk': expected_disk, }, 'volumes': { 'count': 9, 'size': expected_size, }, }, { 'machine': { 'ram': expected_ram, 'disk': 3, }, 'volumes': { 'count': expected_count, }, }, ] hint = OpenStack().interpret_hints(defaults, hints) assert hint == { 'machine': { 'ram': expected_ram, 'disk': expected_disk, 'cpus': expected_cpus, }, 'volumes': { 'count': expected_count, 'size': expected_size, } } assert defaults == OpenStack().interpret_hints(defaults, None) def test_get_provider(self): auth = os.environ.get('OS_AUTH_URL', None) os.environ['OS_AUTH_URL'] = 'cloud.ovh.net' assert OpenStack().get_provider() == 'ovh' if auth != None: os.environ['OS_AUTH_URL'] = auth else: del os.environ['OS_AUTH_URL'] def test_get_os_url(self): o = OpenStack() # # Only for OVH # o.provider = 'something' assert "" == o.get_os_url("server ") o.provider = 'ovh' assert "" == o.get_os_url("unknown ") type2cmd = { 'compute': ('server', 'flavor'), 'network': ('ip', 'security', 'network'), 'image': ('image',), 'volume': ('volume',), } os.environ['OS_REGION_NAME'] = 'REGION' os.environ['OS_TENANT_ID'] = 'TENANT' for (type, cmds) in type2cmd.items(): for cmd in cmds: assert ("//" + type) in o.get_os_url(cmd + " ") for type in type2cmd.keys(): assert ("//" + type) in o.get_os_url("whatever ", type=type) @patch('teuthology.misc.sh') def test_cache_token(self, m_sh): token = 'TOKEN VALUE' m_sh.return_value = token OpenStack.token = None o = OpenStack() # # Only for OVH # o.provider = 'something' assert False == o.cache_token() o.provider = 'ovh' # # Set the environment with the token # assert 'OS_TOKEN_VALUE' not in os.environ assert 'OS_TOKEN_EXPIRES' not in os.environ assert True == o.cache_token() m_sh.assert_called_with('openstack -q token issue -c id -f value') assert token == os.environ['OS_TOKEN_VALUE'] assert token == OpenStack.token assert time.time() < int(os.environ['OS_TOKEN_EXPIRES']) assert time.time() < OpenStack.token_expires # # Reset after it expires # token_expires = int(time.time()) - 2000 OpenStack.token_expires = token_expires assert True == o.cache_token() assert time.time() < int(os.environ['OS_TOKEN_EXPIRES']) assert time.time() < OpenStack.token_expires @patch('teuthology.misc.sh') def test_cache_token_from_environment(self, m_sh): OpenStack.token = None o = OpenStack() o.provider = 'ovh' token = 'TOKEN VALUE' os.environ['OS_TOKEN_VALUE'] = token token_expires = int(time.time()) + OpenStack.token_cache_duration os.environ['OS_TOKEN_EXPIRES'] = str(token_expires) assert True == o.cache_token() assert token == OpenStack.token assert token_expires == OpenStack.token_expires m_sh.assert_not_called() @patch('teuthology.misc.sh') def test_cache_token_expired_environment(self, m_sh): token = 'TOKEN VALUE' m_sh.return_value = token OpenStack.token = None o = OpenStack() o.provider = 'ovh' os.environ['OS_TOKEN_VALUE'] = token token_expires = int(time.time()) - 2000 os.environ['OS_TOKEN_EXPIRES'] = str(token_expires) assert True == o.cache_token() m_sh.assert_called_with('openstack -q token issue -c id -f value') assert token == os.environ['OS_TOKEN_VALUE'] assert token == OpenStack.token assert time.time() < int(os.environ['OS_TOKEN_EXPIRES']) assert time.time() < OpenStack.token_expires class TestTeuthologyOpenStack(TestOpenStackBase): @classmethod def setup_class(self): if 'OS_AUTH_URL' not in os.environ: pytest.skip('no OS_AUTH_URL environment variable') teuthology.log.setLevel(logging.DEBUG) set_config_attr(argparse.Namespace()) ip = TeuthologyOpenStack.create_floating_ip() if ip: ip_id = TeuthologyOpenStack.get_floating_ip_id(ip) OpenStack().run("ip floating delete " + ip_id) self.can_create_floating_ips = True else: self.can_create_floating_ips = False def setup(self): super(TestTeuthologyOpenStack, self).setup() self.key_filename = tempfile.mktemp() self.key_name = 'teuthology-test' self.name = 'teuthology-test' self.clobber() misc.sh(""" openstack keypair create {key_name} > {key_filename} chmod 600 {key_filename} """.format(key_filename=self.key_filename, key_name=self.key_name)) self.options = ['--key-name', self.key_name, '--key-filename', self.key_filename, '--name', self.name, '--verbose'] def teardown(self): super(TestTeuthologyOpenStack, self).teardown() self.clobber() os.unlink(self.key_filename) def clobber(self): misc.sh(""" openstack server delete {name} --wait || true openstack keypair delete {key_name} || true """.format(key_name=self.key_name, name=self.name)) def test_create(self, caplog): teuthology_argv = [ '--suite', 'upgrade/hammer', '--dry-run', '--ceph', 'master', '--kernel', 'distro', '--flavor', 'gcov', '--distro', 'ubuntu', '--suite-branch', 'hammer', '--email', 'loic@dachary.org', '--num', '10', '--limit', '23', '--subset', '1/2', '--priority', '101', '--timeout', '234', '--filter', 'trasher', '--filter-out', 'erasure-code', '--throttle', '3', ] archive_upload = 'user@archive:/tmp' argv = (self.options + ['--teuthology-git-url', 'TEUTHOLOGY_URL', '--teuthology-branch', 'TEUTHOLOGY_BRANCH', '--ceph-workbench-git-url', 'CEPH_WORKBENCH_URL', '--ceph-workbench-branch', 'CEPH_WORKBENCH_BRANCH', '--upload', '--archive-upload', archive_upload] + teuthology_argv) args = scripts.openstack.parse_args(argv) teuthology_argv.extend([ '--archive-upload', archive_upload, '--archive-upload-url', args.archive_upload_url, ]) teuthology = TeuthologyOpenStack(args, None, argv) teuthology.user_data = 'teuthology/openstack/test/user-data-test1.txt' teuthology.teuthology_suite = 'echo --' teuthology.main() assert 0 == teuthology.ssh("lsb_release -a") assert 0 == teuthology.ssh("grep 'substituded variables' /var/log/cloud-init.log") l = caplog.text assert 'Ubuntu 14.04' in l assert "nworkers=" + str(args.simultaneous_jobs) in l assert "username=" + teuthology.username in l assert "upload=--archive-upload user@archive:/tmp" in l assert ("ceph_workbench=" " --ceph-workbench-branch CEPH_WORKBENCH_BRANCH" " --ceph-workbench-git-url CEPH_WORKBENCH_URL") in l assert "clone=git clone -b TEUTHOLOGY_BRANCH TEUTHOLOGY_URL" in l assert os.environ['OS_AUTH_URL'] in l assert " ".join(teuthology_argv) in l if self.can_create_floating_ips: ip = teuthology.get_floating_ip(self.name) teuthology.teardown() if self.can_create_floating_ips: assert teuthology.get_floating_ip_id(ip) == None def test_floating_ip(self): if not self.can_create_floating_ips: pytest.skip('unable to create floating ips') expected = TeuthologyOpenStack.create_floating_ip() ip = TeuthologyOpenStack.get_unassociated_floating_ip() assert expected == ip ip_id = TeuthologyOpenStack.get_floating_ip_id(ip) OpenStack().run("ip floating delete " + ip_id)
""" Press 's' to take a picture and start matching image real-time """ import cv2 import numpy as np from cam import MyCam from fmatch import draw_match print __doc__ MIN_MATCH_COUNT = 10 orb = cv2.ORB() cam = MyCam() cam.size = (640, 480) img1 = img1 = cv2.imread('box.png', 0) cv2.imshow('source', img1) while True: img2 = cv2.cvtColor(cam.read(), cv2.COLOR_BGR2GRAY) k = cv2.waitKey(5) if k == ord('s'): img1 = img2.copy() cv2.imwrite('campic.png', img1) elif k== 27: break # find the keypoints and descriptors with ORB if k is not None: cv2.destroyWindow('preview') kp1, des1 = orb.detectAndCompute(img1,None) kp2, des2 = orb.detectAndCompute(img2,None) # If nothing match then continue if des2 is None: img3 = img3 = draw_match(img1,kp1,img2,kp2,[]) continue des1 = des1.astype(np.uint8, copy=False) # Fix the data type des2 = des2.astype(np.uint8, copy=False) # Now match describers bf = cv2.BFMatcher(cv2.NORM_HAMMING) # matches = bf.match(des1,des2) matches = bf.knnMatch(des1,des2, k=2) # m = matches[0][0] # p1, p2 = np.float32(kp1[m.queryIdx].pt), np.float32(kp2[m.trainIdx].pt) # print m.distance, p1, p2 # Apply ratio test good = [] try: for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) except ValueError: good = [] if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) matchesMask = mask.ravel().tolist() h,w = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) area = cv2.contourArea(dst) cv2.polylines(img2,[np.int32(dst)],True,255,3) else: # print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT) matchesMask = None good = [] img3 = draw_match(img1,kp1,img2,kp2,good, matchesMask=matchesMask) cv2.imshow('matches', img3) print 'press any key to continue'
""" homeassistant.components.sensor.mysensors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for MySensors sensors. Configuration: To use the MySensors sensor you will need to add something like the following to your config/configuration.yaml sensor: platform: mysensors port: '/dev/ttyACM0' Variables: port *Required Port of your connection to your MySensors device. """ import logging from homeassistant.helpers.entity import Entity from homeassistant.const import ( ATTR_BATTERY_LEVEL, EVENT_HOMEASSISTANT_STOP, TEMP_CELCIUS, TEMP_FAHRENHEIT, STATE_ON, STATE_OFF) CONF_PORT = "port" CONF_DEBUG = "debug" CONF_PERSISTENCE = "persistence" ATTR_NODE_ID = "node_id" ATTR_CHILD_ID = "child_id" _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['https://github.com/theolind/pymysensors/archive/master.zip' '#egg=pymysensors-0.1'] def setup_platform(hass, config, add_devices, discovery_info=None): """ Setup the mysensors platform. """ import mysensors.mysensors as mysensors import mysensors.const_14 as const devices = {} # keep track of devices added to HA # Just assume celcius means that the user wants metric for now. # It may make more sense to make this a global config option in the future. is_metric = (hass.config.temperature_unit == TEMP_CELCIUS) def sensor_update(update_type, nid): """ Callback for sensor updates from the MySensors gateway. """ _LOGGER.info("sensor_update %s: node %s", update_type, nid) sensor = gateway.sensors[nid] if sensor.sketch_name is None: return if nid not in devices: devices[nid] = {} node = devices[nid] new_devices = [] for child_id, child in sensor.children.items(): if child_id not in node: node[child_id] = {} for value_type, value in child.values.items(): if value_type not in node[child_id]: name = '{} {}.{}'.format(sensor.sketch_name, nid, child.id) node[child_id][value_type] = \ MySensorsNodeValue( nid, child_id, name, value_type, is_metric, const) new_devices.append(node[child_id][value_type]) else: node[child_id][value_type].update_sensor( value, sensor.battery_level) if new_devices: _LOGGER.info("adding new devices: %s", new_devices) add_devices(new_devices) port = config.get(CONF_PORT) if port is None: _LOGGER.error("Missing required key 'port'") return False persistence = config.get(CONF_PERSISTENCE, True) gateway = mysensors.SerialGateway(port, sensor_update, persistence=persistence) gateway.metric = is_metric gateway.debug = config.get(CONF_DEBUG, False) gateway.start() if persistence: for nid in gateway.sensors: sensor_update('sensor_update', nid) hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lambda event: gateway.stop()) class MySensorsNodeValue(Entity): """ Represents the value of a MySensors child node. """ # pylint: disable=too-many-arguments, too-many-instance-attributes def __init__(self, node_id, child_id, name, value_type, metric, const): self._name = name self.node_id = node_id self.child_id = child_id self.battery_level = 0 self.value_type = value_type self.metric = metric self._value = '' self.const = const @property def should_poll(self): """ MySensor gateway pushes its state to HA. """ return False @property def name(self): """ The name of this sensor. """ return self._name @property def state(self): """ Returns the state of the device. """ return self._value @property def unit_of_measurement(self): """ Unit of measurement of this entity. """ if self.value_type == self.const.SetReq.V_TEMP: return TEMP_CELCIUS if self.metric else TEMP_FAHRENHEIT elif self.value_type == self.const.SetReq.V_HUM or \ self.value_type == self.const.SetReq.V_DIMMER or \ self.value_type == self.const.SetReq.V_LIGHT_LEVEL: return '%' return None @property def state_attributes(self): """ Returns the state attributes. """ return { ATTR_NODE_ID: self.node_id, ATTR_CHILD_ID: self.child_id, ATTR_BATTERY_LEVEL: self.battery_level, } def update_sensor(self, value, battery_level): """ Update a sensor with the latest value from the controller. """ _LOGGER.info("%s value = %s", self._name, value) if self.value_type == self.const.SetReq.V_TRIPPED or \ self.value_type == self.const.SetReq.V_ARMED: self._value = STATE_ON if int(value) == 1 else STATE_OFF else: self._value = value self.battery_level = battery_level self.update_ha_state()
import requests class DataViewRestClient(): def __init__(self, endpoint, authtoken, certificate=None): self.ENDPOINT = endpoint self.AUTHTOKEN = authtoken self.CERTIFICATE = certificate if not self.ENDPOINT.endswith('/'): self.ENDPOINT = self.ENDPOINT + '/' def get_headers(self): headers = {'Authorization': 'Token: ' + self.AUTHTOKEN, 'Accept': 'application/json'} def list_models(self, name): r = requests.get(self.ENDPOINT + '' + name, headers=self.get_headers(), verify=self.CERTIFICATE) if r.status_code == 200: return r.json() else: raise Exception(r.status_code, r.text) def create_model(self, name, values): r = requests.post(self.ENDPOINT + '' + name + '/', headers=self.get_headers(), verify=self.CERTIFICATE, data=values) if r.status_code == 201: return r.json() else: raise Exception(r.status_code, r.text) def get_model(self, name, key): r = requests.get(self.ENDPOINT + '' + name + '/' + str(key), headers=self.get_headers(), verify=self.CERTIFICATE) if r.status_code == 200: return r.json() else: raise Exception(r.status_code, r.text) def update_model(self, name, key, values): r= requests.get(self.ENDPOINT + '' + name + '/' + str(key), headers=self.get_headers(), verify=self.CERTIFICATE, data=values) def delete_model(self): r= requests.delete(self.ENDPOINT + '' + name + '/' + str(key), headers=self.get_headers(), verify=self.CERTIFICATE) if r.status_code == 200: return r.json() else: raise Exception(r.status_code, r.text)
import cython cython.declare(Nodes=object, ExprNodes=object, EncodedString=object, BytesLiteral=object, StringEncoding=object, FileSourceDescriptor=object, lookup_unicodechar=object, Future=object, Options=object, error=object, warning=object, Builtin=object, ModuleNode=object, Utils=object, re=object, _unicode=object, _bytes=object) import re from unicodedata import lookup as lookup_unicodechar from Cython.Compiler.Scanning import PyrexScanner, FileSourceDescriptor import Nodes import ExprNodes import Builtin import StringEncoding from StringEncoding import EncodedString, BytesLiteral, _unicode, _bytes from ModuleNode import ModuleNode from Errors import error, warning from Cython import Utils import Future import Options class Ctx(object): # Parsing context level = 'other' visibility = 'private' cdef_flag = 0 typedef_flag = 0 api = 0 overridable = 0 nogil = 0 namespace = None templates = None allow_struct_enum_decorator = False def __init__(self, **kwds): self.__dict__.update(kwds) def __call__(self, **kwds): ctx = Ctx() d = ctx.__dict__ d.update(self.__dict__) d.update(kwds) return ctx def p_ident(s, message = "Expected an identifier"): if s.sy == 'IDENT': name = s.systring s.next() return name else: s.error(message) def p_ident_list(s): names = [] while s.sy == 'IDENT': names.append(s.systring) s.next() if s.sy != ',': break s.next() return names def p_binop_operator(s): pos = s.position() op = s.sy s.next() return op, pos def p_binop_expr(s, ops, p_sub_expr): n1 = p_sub_expr(s) while s.sy in ops: op, pos = p_binop_operator(s) n2 = p_sub_expr(s) n1 = ExprNodes.binop_node(pos, op, n1, n2) if op == '/': if Future.division in s.context.future_directives: n1.truedivision = True else: n1.truedivision = None # unknown return n1 def p_lambdef(s, allow_conditional=True): # s.sy == 'lambda' pos = s.position() s.next() if s.sy == ':': args = [] star_arg = starstar_arg = None else: args, star_arg, starstar_arg = p_varargslist( s, terminator=':', annotated=False) s.expect(':') if allow_conditional: expr = p_test(s) else: expr = p_test_nocond(s) return ExprNodes.LambdaNode( pos, args = args, star_arg = star_arg, starstar_arg = starstar_arg, result_expr = expr) def p_lambdef_nocond(s): return p_lambdef(s, allow_conditional=False) def p_test(s): if s.sy == 'lambda': return p_lambdef(s) pos = s.position() expr = p_or_test(s) if s.sy == 'if': s.next() test = p_or_test(s) s.expect('else') other = p_test(s) return ExprNodes.CondExprNode(pos, test=test, true_val=expr, false_val=other) else: return expr def p_test_nocond(s): if s.sy == 'lambda': return p_lambdef_nocond(s) else: return p_or_test(s) def p_or_test(s): return p_rassoc_binop_expr(s, ('or',), p_and_test) def p_rassoc_binop_expr(s, ops, p_subexpr): n1 = p_subexpr(s) if s.sy in ops: pos = s.position() op = s.sy s.next() n2 = p_rassoc_binop_expr(s, ops, p_subexpr) n1 = ExprNodes.binop_node(pos, op, n1, n2) return n1 def p_and_test(s): #return p_binop_expr(s, ('and',), p_not_test) return p_rassoc_binop_expr(s, ('and',), p_not_test) def p_not_test(s): if s.sy == 'not': pos = s.position() s.next() return ExprNodes.NotNode(pos, operand = p_not_test(s)) else: return p_comparison(s) def p_comparison(s): n1 = p_starred_expr(s) if s.sy in comparison_ops: pos = s.position() op = p_cmp_op(s) n2 = p_starred_expr(s) n1 = ExprNodes.PrimaryCmpNode(pos, operator = op, operand1 = n1, operand2 = n2) if s.sy in comparison_ops: n1.cascade = p_cascaded_cmp(s) return n1 def p_test_or_starred_expr(s): if s.sy == '*': return p_starred_expr(s) else: return p_test(s) def p_starred_expr(s): pos = s.position() if s.sy == '*': starred = True s.next() else: starred = False expr = p_bit_expr(s) if starred: expr = ExprNodes.StarredTargetNode(pos, expr) return expr def p_cascaded_cmp(s): pos = s.position() op = p_cmp_op(s) n2 = p_starred_expr(s) result = ExprNodes.CascadedCmpNode(pos, operator = op, operand2 = n2) if s.sy in comparison_ops: result.cascade = p_cascaded_cmp(s) return result def p_cmp_op(s): if s.sy == 'not': s.next() s.expect('in') op = 'not_in' elif s.sy == 'is': s.next() if s.sy == 'not': s.next() op = 'is_not' else: op = 'is' else: op = s.sy s.next() if op == '<>': op = '!=' return op comparison_ops = cython.declare(set, set([ '<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'is', 'not' ])) def p_bit_expr(s): return p_binop_expr(s, ('|',), p_xor_expr) def p_xor_expr(s): return p_binop_expr(s, ('^',), p_and_expr) def p_and_expr(s): return p_binop_expr(s, ('&',), p_shift_expr) def p_shift_expr(s): return p_binop_expr(s, ('<<', '>>'), p_arith_expr) def p_arith_expr(s): return p_binop_expr(s, ('+', '-'), p_term) def p_term(s): return p_binop_expr(s, ('*', '/', '%', '//'), p_factor) def p_factor(s): # little indirection for C-ification purposes return _p_factor(s) def _p_factor(s): sy = s.sy if sy in ('+', '-', '~'): op = s.sy pos = s.position() s.next() return ExprNodes.unop_node(pos, op, p_factor(s)) elif not s.in_python_file: if sy == '&': pos = s.position() s.next() arg = p_factor(s) return ExprNodes.AmpersandNode(pos, operand = arg) elif sy == "<": return p_typecast(s) elif sy == 'IDENT' and s.systring == "sizeof": return p_sizeof(s) return p_power(s) def p_typecast(s): # s.sy == "<" pos = s.position() s.next() base_type = p_c_base_type(s) is_memslice = isinstance(base_type, Nodes.MemoryViewSliceTypeNode) is_template = isinstance(base_type, Nodes.TemplatedTypeNode) is_const = isinstance(base_type, Nodes.CConstTypeNode) if (not is_memslice and not is_template and not is_const and base_type.name is None): s.error("Unknown type") declarator = p_c_declarator(s, empty = 1) if s.sy == '?': s.next() typecheck = 1 else: typecheck = 0 s.expect(">") operand = p_factor(s) if is_memslice: return ExprNodes.CythonArrayNode(pos, base_type_node=base_type, operand=operand) return ExprNodes.TypecastNode(pos, base_type = base_type, declarator = declarator, operand = operand, typecheck = typecheck) def p_sizeof(s): # s.sy == ident "sizeof" pos = s.position() s.next() s.expect('(') # Here we decide if we are looking at an expression or type # If it is actually a type, but parsable as an expression, # we treat it as an expression here. if looking_at_expr(s): operand = p_test(s) node = ExprNodes.SizeofVarNode(pos, operand = operand) else: base_type = p_c_base_type(s) declarator = p_c_declarator(s, empty = 1) node = ExprNodes.SizeofTypeNode(pos, base_type = base_type, declarator = declarator) s.expect(')') return node def p_yield_expression(s): # s.sy == "yield" pos = s.position() s.next() is_yield_from = False if s.sy == 'from': is_yield_from = True s.next() if s.sy != ')' and s.sy not in statement_terminators: arg = p_testlist(s) else: if is_yield_from: s.error("'yield from' requires a source argument", pos=pos, fatal=False) arg = None if is_yield_from: return ExprNodes.YieldFromExprNode(pos, arg=arg) else: return ExprNodes.YieldExprNode(pos, arg=arg) def p_yield_statement(s): # s.sy == "yield" yield_expr = p_yield_expression(s) return Nodes.ExprStatNode(yield_expr.pos, expr=yield_expr) def p_power(s): if s.systring == 'new' and s.peek()[0] == 'IDENT': return p_new_expr(s) n1 = p_atom(s) while s.sy in ('(', '[', '.'): n1 = p_trailer(s, n1) if s.sy == '**': pos = s.position() s.next() n2 = p_factor(s) n1 = ExprNodes.binop_node(pos, '**', n1, n2) return n1 def p_new_expr(s): # s.systring == 'new'. pos = s.position() s.next() cppclass = p_c_base_type(s) return p_call(s, ExprNodes.NewExprNode(pos, cppclass = cppclass)) def p_trailer(s, node1): pos = s.position() if s.sy == '(': return p_call(s, node1) elif s.sy == '[': return p_index(s, node1) else: # s.sy == '.' s.next() name = EncodedString( p_ident(s) ) return ExprNodes.AttributeNode(pos, obj = node1, attribute = name) def p_call_parse_args(s, allow_genexp = True): # s.sy == '(' pos = s.position() s.next() positional_args = [] keyword_args = [] star_arg = None starstar_arg = None while s.sy not in ('**', ')'): if s.sy == '*': if star_arg: s.error("only one star-arg parameter allowed", pos=s.position()) s.next() star_arg = p_test(s) else: arg = p_test(s) if s.sy == '=': s.next() if not arg.is_name: s.error("Expected an identifier before '='", pos=arg.pos) encoded_name = EncodedString(arg.name) keyword = ExprNodes.IdentifierStringNode( arg.pos, value=encoded_name) arg = p_test(s) keyword_args.append((keyword, arg)) else: if keyword_args: s.error("Non-keyword arg following keyword arg", pos=arg.pos) if star_arg: s.error("Non-keyword arg following star-arg", pos=arg.pos) positional_args.append(arg) if s.sy != ',': break s.next() if s.sy == 'for': if len(positional_args) == 1 and not star_arg: positional_args = [ p_genexp(s, positional_args[0]) ] elif s.sy == '**': s.next() starstar_arg = p_test(s) if s.sy == ',': s.next() s.expect(')') return positional_args, keyword_args, star_arg, starstar_arg def p_call_build_packed_args(pos, positional_args, keyword_args, star_arg, starstar_arg): arg_tuple = None keyword_dict = None if positional_args or not star_arg: arg_tuple = ExprNodes.TupleNode(pos, args = positional_args) if star_arg: star_arg_tuple = ExprNodes.AsTupleNode(pos, arg = star_arg) if arg_tuple: arg_tuple = ExprNodes.binop_node(pos, operator = '+', operand1 = arg_tuple, operand2 = star_arg_tuple) else: arg_tuple = star_arg_tuple if keyword_args or starstar_arg: keyword_args = [ExprNodes.DictItemNode(pos=key.pos, key=key, value=value) for key, value in keyword_args] if starstar_arg: keyword_dict = ExprNodes.KeywordArgsNode( pos, starstar_arg = starstar_arg, keyword_args = keyword_args) else: keyword_dict = ExprNodes.DictNode( pos, key_value_pairs = keyword_args) return arg_tuple, keyword_dict def p_call(s, function): # s.sy == '(' pos = s.position() positional_args, keyword_args, star_arg, starstar_arg = \ p_call_parse_args(s) if not (keyword_args or star_arg or starstar_arg): return ExprNodes.SimpleCallNode(pos, function = function, args = positional_args) else: arg_tuple, keyword_dict = p_call_build_packed_args( pos, positional_args, keyword_args, star_arg, starstar_arg) return ExprNodes.GeneralCallNode(pos, function = function, positional_args = arg_tuple, keyword_args = keyword_dict) def p_index(s, base): # s.sy == '[' pos = s.position() s.next() subscripts, is_single_value = p_subscript_list(s) if is_single_value and len(subscripts[0]) == 2: start, stop = subscripts[0] result = ExprNodes.SliceIndexNode(pos, base = base, start = start, stop = stop) else: indexes = make_slice_nodes(pos, subscripts) if is_single_value: index = indexes[0] else: index = ExprNodes.TupleNode(pos, args = indexes) result = ExprNodes.IndexNode(pos, base = base, index = index) s.expect(']') return result def p_subscript_list(s): is_single_value = True items = [p_subscript(s)] while s.sy == ',': is_single_value = False s.next() if s.sy == ']': break items.append(p_subscript(s)) return items, is_single_value def p_subscript(s): # Parse a subscript and return a list of # 1, 2 or 3 ExprNodes, depending on how # many slice elements were encountered. pos = s.position() start = p_slice_element(s, (':',)) if s.sy != ':': return [start] s.next() stop = p_slice_element(s, (':', ',', ']')) if s.sy != ':': return [start, stop] s.next() step = p_slice_element(s, (':', ',', ']')) return [start, stop, step] def p_slice_element(s, follow_set): # Simple expression which may be missing iff # it is followed by something in follow_set. if s.sy not in follow_set: return p_test(s) else: return None def expect_ellipsis(s): s.expect('.') s.expect('.') s.expect('.') def make_slice_nodes(pos, subscripts): # Convert a list of subscripts as returned # by p_subscript_list into a list of ExprNodes, # creating SliceNodes for elements with 2 or # more components. result = [] for subscript in subscripts: if len(subscript) == 1: result.append(subscript[0]) else: result.append(make_slice_node(pos, *subscript)) return result def make_slice_node(pos, start, stop = None, step = None): if not start: start = ExprNodes.NoneNode(pos) if not stop: stop = ExprNodes.NoneNode(pos) if not step: step = ExprNodes.NoneNode(pos) return ExprNodes.SliceNode(pos, start = start, stop = stop, step = step) def p_atom(s): pos = s.position() sy = s.sy if sy == '(': s.next() if s.sy == ')': result = ExprNodes.TupleNode(pos, args = []) elif s.sy == 'yield': result = p_yield_expression(s) else: result = p_testlist_comp(s) s.expect(')') return result elif sy == '[': return p_list_maker(s) elif sy == '{': return p_dict_or_set_maker(s) elif sy == '`': return p_backquote_expr(s) elif sy == '.': expect_ellipsis(s) return ExprNodes.EllipsisNode(pos) elif sy == 'INT': return p_int_literal(s) elif sy == 'FLOAT': value = s.systring s.next() return ExprNodes.FloatNode(pos, value = value) elif sy == 'IMAG': value = s.systring[:-1] s.next() return ExprNodes.ImagNode(pos, value = value) elif sy == 'BEGIN_STRING': kind, bytes_value, unicode_value = p_cat_string_literal(s) if kind == 'c': return ExprNodes.CharNode(pos, value = bytes_value) elif kind == 'u': return ExprNodes.UnicodeNode(pos, value = unicode_value, bytes_value = bytes_value) elif kind == 'b': return ExprNodes.BytesNode(pos, value = bytes_value) else: return ExprNodes.StringNode(pos, value = bytes_value, unicode_value = unicode_value) elif sy == 'IDENT': name = EncodedString( s.systring ) s.next() if name == "None": return ExprNodes.NoneNode(pos) elif name == "True": return ExprNodes.BoolNode(pos, value=True) elif name == "False": return ExprNodes.BoolNode(pos, value=False) elif name == "NULL" and not s.in_python_file: return ExprNodes.NullNode(pos) else: return p_name(s, name) else: s.error("Expected an identifier or literal") def p_int_literal(s): pos = s.position() value = s.systring s.next() unsigned = "" longness = "" while value[-1] in u"UuLl": if value[-1] in u"Ll": longness += "L" else: unsigned += "U" value = value[:-1] # '3L' is ambiguous in Py2 but not in Py3. '3U' and '3LL' are # illegal in Py2 Python files. All suffixes are illegal in Py3 # Python files. is_c_literal = None if unsigned: is_c_literal = True elif longness: if longness == 'LL' or s.context.language_level >= 3: is_c_literal = True if s.in_python_file: if is_c_literal: error(pos, "illegal integer literal syntax in Python source file") is_c_literal = False return ExprNodes.IntNode(pos, is_c_literal = is_c_literal, value = value, unsigned = unsigned, longness = longness) def p_name(s, name): pos = s.position() if not s.compile_time_expr and name in s.compile_time_env: value = s.compile_time_env.lookup_here(name) node = wrap_compile_time_constant(pos, value) if node is not None: return node return ExprNodes.NameNode(pos, name=name) def wrap_compile_time_constant(pos, value): rep = repr(value) if value is None: return ExprNodes.NoneNode(pos) elif value is Ellipsis: return ExprNodes.EllipsisNode(pos) elif isinstance(value, bool): return ExprNodes.BoolNode(pos, value=value) elif isinstance(value, int): return ExprNodes.IntNode(pos, value=rep) elif isinstance(value, long): return ExprNodes.IntNode(pos, value=rep, longness="L") elif isinstance(value, float): return ExprNodes.FloatNode(pos, value=rep) elif isinstance(value, _unicode): return ExprNodes.UnicodeNode(pos, value=EncodedString(value)) elif isinstance(value, _bytes): return ExprNodes.BytesNode(pos, value=BytesLiteral(value)) elif isinstance(value, tuple): args = [wrap_compile_time_constant(pos, arg) for arg in value] if None not in args: return ExprNodes.TupleNode(pos, args=args) else: # error already reported return None error(pos, "Invalid type for compile-time constant: %r (type %s)" % (value, value.__class__.__name__)) return None def p_cat_string_literal(s): # A sequence of one or more adjacent string literals. # Returns (kind, bytes_value, unicode_value) # where kind in ('b', 'c', 'u', '') kind, bytes_value, unicode_value = p_string_literal(s) if kind == 'c' or s.sy != 'BEGIN_STRING': return kind, bytes_value, unicode_value bstrings, ustrings = [bytes_value], [unicode_value] bytes_value = unicode_value = None while s.sy == 'BEGIN_STRING': pos = s.position() next_kind, next_bytes_value, next_unicode_value = p_string_literal(s) if next_kind == 'c': error(pos, "Cannot concatenate char literal with another string or char literal") elif next_kind != kind: error(pos, "Cannot mix string literals of different types, expected %s'', got %s''" % (kind, next_kind)) else: bstrings.append(next_bytes_value) ustrings.append(next_unicode_value) # join and rewrap the partial literals if kind in ('b', 'c', '') or kind == 'u' and None not in bstrings: # Py3 enforced unicode literals are parsed as bytes/unicode combination bytes_value = BytesLiteral( StringEncoding.join_bytes(bstrings) ) bytes_value.encoding = s.source_encoding if kind in ('u', ''): unicode_value = EncodedString( u''.join([ u for u in ustrings if u is not None ]) ) return kind, bytes_value, unicode_value def p_opt_string_literal(s, required_type='u'): if s.sy == 'BEGIN_STRING': kind, bytes_value, unicode_value = p_string_literal(s, required_type) if required_type == 'u': return unicode_value elif required_type == 'b': return bytes_value else: s.error("internal parser configuration error") else: return None def check_for_non_ascii_characters(string): for c in string: if c >= u'\x80': return True return False def p_string_literal(s, kind_override=None): # A single string or char literal. Returns (kind, bvalue, uvalue) # where kind in ('b', 'c', 'u', ''). The 'bvalue' is the source # code byte sequence of the string literal, 'uvalue' is the # decoded Unicode string. Either of the two may be None depending # on the 'kind' of string, only unprefixed strings have both # representations. # s.sy == 'BEGIN_STRING' pos = s.position() is_raw = False is_python3_source = s.context.language_level >= 3 has_non_ASCII_literal_characters = False kind = s.systring[:1].lower() if kind == 'r': # Py3 allows both 'br' and 'rb' as prefix if s.systring[1:2].lower() == 'b': kind = 'b' else: kind = '' is_raw = True elif kind in 'ub': is_raw = s.systring[1:2].lower() == 'r' elif kind != 'c': kind = '' if kind == '' and kind_override is None and Future.unicode_literals in s.context.future_directives: chars = StringEncoding.StrLiteralBuilder(s.source_encoding) kind = 'u' else: if kind_override is not None and kind_override in 'ub': kind = kind_override if kind == 'u': chars = StringEncoding.UnicodeLiteralBuilder() elif kind == '': chars = StringEncoding.StrLiteralBuilder(s.source_encoding) else: chars = StringEncoding.BytesLiteralBuilder(s.source_encoding) while 1: s.next() sy = s.sy systr = s.systring #print "p_string_literal: sy =", sy, repr(s.systring) ### if sy == 'CHARS': chars.append(systr) if is_python3_source and not has_non_ASCII_literal_characters and check_for_non_ascii_characters(systr): has_non_ASCII_literal_characters = True elif sy == 'ESCAPE': if is_raw: chars.append(systr) if is_python3_source and not has_non_ASCII_literal_characters \ and check_for_non_ascii_characters(systr): has_non_ASCII_literal_characters = True else: c = systr[1] if c in u"01234567": chars.append_charval( int(systr[1:], 8) ) elif c in u"'\"\\": chars.append(c) elif c in u"abfnrtv": chars.append( StringEncoding.char_from_escape_sequence(systr)) elif c == u'\n': pass elif c == u'x': # \xXX if len(systr) == 4: chars.append_charval( int(systr[2:], 16) ) else: s.error("Invalid hex escape '%s'" % systr, fatal=False) elif c in u'NUu' and kind in ('u', ''): # \uxxxx, \Uxxxxxxxx, \N{...} chrval = -1 if c == u'N': try: chrval = ord(lookup_unicodechar(systr[3:-1])) except KeyError: s.error("Unknown Unicode character name %s" % repr(systr[3:-1]).lstrip('u')) elif len(systr) in (6,10): chrval = int(systr[2:], 16) if chrval > 1114111: # sys.maxunicode: s.error("Invalid unicode escape '%s'" % systr) chrval = -1 else: s.error("Invalid unicode escape '%s'" % systr, fatal=False) if chrval >= 0: chars.append_uescape(chrval, systr) else: chars.append(u'\\' + systr[1:]) if is_python3_source and not has_non_ASCII_literal_characters \ and check_for_non_ascii_characters(systr): has_non_ASCII_literal_characters = True elif sy == 'NEWLINE': chars.append(u'\n') elif sy == 'END_STRING': break elif sy == 'EOF': s.error("Unclosed string literal", pos=pos) else: s.error("Unexpected token %r:%r in string literal" % (sy, s.systring)) if kind == 'c': unicode_value = None bytes_value = chars.getchar() if len(bytes_value) != 1: error(pos, u"invalid character literal: %r" % bytes_value) else: bytes_value, unicode_value = chars.getstrings() if is_python3_source and has_non_ASCII_literal_characters: # Python 3 forbids literal non-ASCII characters in byte strings if kind != 'u': s.error("bytes can only contain ASCII literal characters.", pos=pos, fatal=False) bytes_value = None s.next() return (kind, bytes_value, unicode_value) def p_list_maker(s): # s.sy == '[' pos = s.position() s.next() if s.sy == ']': s.expect(']') return ExprNodes.ListNode(pos, args = []) expr = p_test(s) if s.sy == 'for': append = ExprNodes.ComprehensionAppendNode(pos, expr=expr) loop = p_comp_for(s, append) s.expect(']') return ExprNodes.ComprehensionNode( pos, loop=loop, append=append, type = Builtin.list_type, # list comprehensions leak their loop variable in Py2 has_local_scope = s.context.language_level >= 3) else: if s.sy == ',': s.next() exprs = p_simple_expr_list(s, expr) else: exprs = [expr] s.expect(']') return ExprNodes.ListNode(pos, args = exprs) def p_comp_iter(s, body): if s.sy == 'for': return p_comp_for(s, body) elif s.sy == 'if': return p_comp_if(s, body) else: # insert the 'append' operation into the loop return body def p_comp_for(s, body): # s.sy == 'for' pos = s.position() s.next() kw = p_for_bounds(s, allow_testlist=False) kw.update(else_clause = None, body = p_comp_iter(s, body)) return Nodes.ForStatNode(pos, **kw) def p_comp_if(s, body): # s.sy == 'if' pos = s.position() s.next() test = p_test_nocond(s) return Nodes.IfStatNode(pos, if_clauses = [Nodes.IfClauseNode(pos, condition = test, body = p_comp_iter(s, body))], else_clause = None ) def p_dict_or_set_maker(s): # s.sy == '{' pos = s.position() s.next() if s.sy == '}': s.next() return ExprNodes.DictNode(pos, key_value_pairs = []) item = p_test(s) if s.sy == ',' or s.sy == '}': # set literal values = [item] while s.sy == ',': s.next() if s.sy == '}': break values.append( p_test(s) ) s.expect('}') return ExprNodes.SetNode(pos, args=values) elif s.sy == 'for': # set comprehension append = ExprNodes.ComprehensionAppendNode( item.pos, expr=item) loop = p_comp_for(s, append) s.expect('}') return ExprNodes.ComprehensionNode( pos, loop=loop, append=append, type=Builtin.set_type) elif s.sy == ':': # dict literal or comprehension key = item s.next() value = p_test(s) if s.sy == 'for': # dict comprehension append = ExprNodes.DictComprehensionAppendNode( item.pos, key_expr=key, value_expr=value) loop = p_comp_for(s, append) s.expect('}') return ExprNodes.ComprehensionNode( pos, loop=loop, append=append, type=Builtin.dict_type) else: # dict literal items = [ExprNodes.DictItemNode(key.pos, key=key, value=value)] while s.sy == ',': s.next() if s.sy == '}': break key = p_test(s) s.expect(':') value = p_test(s) items.append( ExprNodes.DictItemNode(key.pos, key=key, value=value)) s.expect('}') return ExprNodes.DictNode(pos, key_value_pairs=items) else: # raise an error s.expect('}') return ExprNodes.DictNode(pos, key_value_pairs = []) def p_backquote_expr(s): # s.sy == '`' pos = s.position() s.next() args = [p_test(s)] while s.sy == ',': s.next() args.append(p_test(s)) s.expect('`') if len(args) == 1: arg = args[0] else: arg = ExprNodes.TupleNode(pos, args = args) return ExprNodes.BackquoteNode(pos, arg = arg) def p_simple_expr_list(s, expr=None): exprs = expr is not None and [expr] or [] while s.sy not in expr_terminators: exprs.append( p_test(s) ) if s.sy != ',': break s.next() return exprs def p_test_or_starred_expr_list(s, expr=None): exprs = expr is not None and [expr] or [] while s.sy not in expr_terminators: exprs.append( p_test_or_starred_expr(s) ) if s.sy != ',': break s.next() return exprs def p_testlist(s): pos = s.position() expr = p_test(s) if s.sy == ',': s.next() exprs = p_simple_expr_list(s, expr) return ExprNodes.TupleNode(pos, args = exprs) else: return expr def p_testlist_star_expr(s): pos = s.position() expr = p_test_or_starred_expr(s) if s.sy == ',': s.next() exprs = p_test_or_starred_expr_list(s, expr) return ExprNodes.TupleNode(pos, args = exprs) else: return expr def p_testlist_comp(s): pos = s.position() expr = p_test_or_starred_expr(s) if s.sy == ',': s.next() exprs = p_test_or_starred_expr_list(s, expr) return ExprNodes.TupleNode(pos, args = exprs) elif s.sy == 'for': return p_genexp(s, expr) else: return expr def p_genexp(s, expr): # s.sy == 'for' loop = p_comp_for(s, Nodes.ExprStatNode( expr.pos, expr = ExprNodes.YieldExprNode(expr.pos, arg=expr))) return ExprNodes.GeneratorExpressionNode(expr.pos, loop=loop) expr_terminators = cython.declare(set, set([ ')', ']', '}', ':', '=', 'NEWLINE'])) def p_global_statement(s): # assume s.sy == 'global' pos = s.position() s.next() names = p_ident_list(s) return Nodes.GlobalNode(pos, names = names) def p_nonlocal_statement(s): pos = s.position() s.next() names = p_ident_list(s) return Nodes.NonlocalNode(pos, names = names) def p_expression_or_assignment(s): expr_list = [p_testlist_star_expr(s)] if s.sy == '=' and expr_list[0].is_starred: # This is a common enough error to make when learning Cython to let # it fail as early as possible and give a very clear error message. s.error("a starred assignment target must be in a list or tuple" " - maybe you meant to use an index assignment: var[0] = ...", pos=expr_list[0].pos) while s.sy == '=': s.next() if s.sy == 'yield': expr = p_yield_expression(s) else: expr = p_testlist_star_expr(s) expr_list.append(expr) if len(expr_list) == 1: if re.match(r"([+*/\%^\&|-]|<<|>>|\*\*|//)=", s.sy): lhs = expr_list[0] if isinstance(lhs, ExprNodes.SliceIndexNode): # implementation requires IndexNode lhs = ExprNodes.IndexNode( lhs.pos, base=lhs.base, index=make_slice_node(lhs.pos, lhs.start, lhs.stop)) elif not isinstance(lhs, (ExprNodes.AttributeNode, ExprNodes.IndexNode, ExprNodes.NameNode) ): error(lhs.pos, "Illegal operand for inplace operation.") operator = s.sy[:-1] s.next() if s.sy == 'yield': rhs = p_yield_expression(s) else: rhs = p_testlist(s) return Nodes.InPlaceAssignmentNode(lhs.pos, operator = operator, lhs = lhs, rhs = rhs) expr = expr_list[0] return Nodes.ExprStatNode(expr.pos, expr=expr) rhs = expr_list[-1] if len(expr_list) == 2: return Nodes.SingleAssignmentNode(rhs.pos, lhs = expr_list[0], rhs = rhs) else: return Nodes.CascadedAssignmentNode(rhs.pos, lhs_list = expr_list[:-1], rhs = rhs) def p_print_statement(s): # s.sy == 'print' pos = s.position() ends_with_comma = 0 s.next() if s.sy == '>>': s.next() stream = p_test(s) if s.sy == ',': s.next() ends_with_comma = s.sy in ('NEWLINE', 'EOF') else: stream = None args = [] if s.sy not in ('NEWLINE', 'EOF'): args.append(p_test(s)) while s.sy == ',': s.next() if s.sy in ('NEWLINE', 'EOF'): ends_with_comma = 1 break args.append(p_test(s)) arg_tuple = ExprNodes.TupleNode(pos, args = args) return Nodes.PrintStatNode(pos, arg_tuple = arg_tuple, stream = stream, append_newline = not ends_with_comma) def p_exec_statement(s): # s.sy == 'exec' pos = s.position() s.next() code = p_bit_expr(s) if isinstance(code, ExprNodes.TupleNode): # Py3 compatibility syntax tuple_variant = True args = code.args if len(args) not in (2, 3): s.error("expected tuple of length 2 or 3, got length %d" % len(args), pos=pos, fatal=False) args = [code] else: tuple_variant = False args = [code] if s.sy == 'in': if tuple_variant: s.error("tuple variant of exec does not support additional 'in' arguments", fatal=False) s.next() args.append(p_test(s)) if s.sy == ',': s.next() args.append(p_test(s)) return Nodes.ExecStatNode(pos, args=args) def p_del_statement(s): # s.sy == 'del' pos = s.position() s.next() # FIXME: 'exprlist' in Python args = p_simple_expr_list(s) return Nodes.DelStatNode(pos, args = args) def p_pass_statement(s, with_newline = 0): pos = s.position() s.expect('pass') if with_newline: s.expect_newline("Expected a newline") return Nodes.PassStatNode(pos) def p_break_statement(s): # s.sy == 'break' pos = s.position() s.next() return Nodes.BreakStatNode(pos) def p_continue_statement(s): # s.sy == 'continue' pos = s.position() s.next() return Nodes.ContinueStatNode(pos) def p_return_statement(s): # s.sy == 'return' pos = s.position() s.next() if s.sy not in statement_terminators: value = p_testlist(s) else: value = None return Nodes.ReturnStatNode(pos, value = value) def p_raise_statement(s): # s.sy == 'raise' pos = s.position() s.next() exc_type = None exc_value = None exc_tb = None cause = None if s.sy not in statement_terminators: exc_type = p_test(s) if s.sy == ',': s.next() exc_value = p_test(s) if s.sy == ',': s.next() exc_tb = p_test(s) elif s.sy == 'from': s.next() cause = p_test(s) if exc_type or exc_value or exc_tb: return Nodes.RaiseStatNode(pos, exc_type = exc_type, exc_value = exc_value, exc_tb = exc_tb, cause = cause) else: return Nodes.ReraiseStatNode(pos) def p_import_statement(s): # s.sy in ('import', 'cimport') pos = s.position() kind = s.sy s.next() items = [p_dotted_name(s, as_allowed = 1)] while s.sy == ',': s.next() items.append(p_dotted_name(s, as_allowed = 1)) stats = [] for pos, target_name, dotted_name, as_name in items: dotted_name = EncodedString(dotted_name) if kind == 'cimport': stat = Nodes.CImportStatNode(pos, module_name = dotted_name, as_name = as_name) else: if as_name and "." in dotted_name: name_list = ExprNodes.ListNode(pos, args = [ ExprNodes.IdentifierStringNode(pos, value = EncodedString("*"))]) else: name_list = None stat = Nodes.SingleAssignmentNode(pos, lhs = ExprNodes.NameNode(pos, name = as_name or target_name), rhs = ExprNodes.ImportNode(pos, module_name = ExprNodes.IdentifierStringNode( pos, value = dotted_name), level = None, name_list = name_list)) stats.append(stat) return Nodes.StatListNode(pos, stats = stats) def p_from_import_statement(s, first_statement = 0): # s.sy == 'from' pos = s.position() s.next() if s.sy == '.': # count relative import level level = 0 while s.sy == '.': level += 1 s.next() if s.sy == 'cimport': s.error("Relative cimport is not supported yet") else: level = None if level is not None and s.sy == 'import': # we are dealing with "from .. import foo, bar" dotted_name_pos, dotted_name = s.position(), '' elif level is not None and s.sy == 'cimport': # "from .. cimport" s.error("Relative cimport is not supported yet") else: (dotted_name_pos, _, dotted_name, _) = \ p_dotted_name(s, as_allowed = 0) if s.sy in ('import', 'cimport'): kind = s.sy s.next() else: s.error("Expected 'import' or 'cimport'") is_cimport = kind == 'cimport' is_parenthesized = False if s.sy == '*': imported_names = [(s.position(), "*", None, None)] s.next() else: if s.sy == '(': is_parenthesized = True s.next() imported_names = [p_imported_name(s, is_cimport)] while s.sy == ',': s.next() if is_parenthesized and s.sy == ')': break imported_names.append(p_imported_name(s, is_cimport)) if is_parenthesized: s.expect(')') dotted_name = EncodedString(dotted_name) if dotted_name == '__future__': if not first_statement: s.error("from __future__ imports must occur at the beginning of the file") elif level is not None: s.error("invalid syntax") else: for (name_pos, name, as_name, kind) in imported_names: if name == "braces": s.error("not a chance", name_pos) break try: directive = getattr(Future, name) except AttributeError: s.error("future feature %s is not defined" % name, name_pos) break s.context.future_directives.add(directive) return Nodes.PassStatNode(pos) elif kind == 'cimport': return Nodes.FromCImportStatNode(pos, module_name = dotted_name, imported_names = imported_names) else: imported_name_strings = [] items = [] for (name_pos, name, as_name, kind) in imported_names: encoded_name = EncodedString(name) imported_name_strings.append( ExprNodes.IdentifierStringNode(name_pos, value = encoded_name)) items.append( (name, ExprNodes.NameNode(name_pos, name = as_name or name))) import_list = ExprNodes.ListNode( imported_names[0][0], args = imported_name_strings) dotted_name = EncodedString(dotted_name) return Nodes.FromImportStatNode(pos, module = ExprNodes.ImportNode(dotted_name_pos, module_name = ExprNodes.IdentifierStringNode(pos, value = dotted_name), level = level, name_list = import_list), items = items) imported_name_kinds = cython.declare( set, set(['class', 'struct', 'union'])) def p_imported_name(s, is_cimport): pos = s.position() kind = None if is_cimport and s.systring in imported_name_kinds: kind = s.systring s.next() name = p_ident(s) as_name = p_as_name(s) return (pos, name, as_name, kind) def p_dotted_name(s, as_allowed): pos = s.position() target_name = p_ident(s) as_name = None names = [target_name] while s.sy == '.': s.next() names.append(p_ident(s)) if as_allowed: as_name = p_as_name(s) return (pos, target_name, u'.'.join(names), as_name) def p_as_name(s): if s.sy == 'IDENT' and s.systring == 'as': s.next() return p_ident(s) else: return None def p_assert_statement(s): # s.sy == 'assert' pos = s.position() s.next() cond = p_test(s) if s.sy == ',': s.next() value = p_test(s) else: value = None return Nodes.AssertStatNode(pos, cond = cond, value = value) statement_terminators = cython.declare(set, set([';', 'NEWLINE', 'EOF'])) def p_if_statement(s): # s.sy == 'if' pos = s.position() s.next() if_clauses = [p_if_clause(s)] while s.sy == 'elif': s.next() if_clauses.append(p_if_clause(s)) else_clause = p_else_clause(s) return Nodes.IfStatNode(pos, if_clauses = if_clauses, else_clause = else_clause) def p_if_clause(s): pos = s.position() test = p_test(s) body = p_suite(s) return Nodes.IfClauseNode(pos, condition = test, body = body) def p_else_clause(s): if s.sy == 'else': s.next() return p_suite(s) else: return None def p_while_statement(s): # s.sy == 'while' pos = s.position() s.next() test = p_test(s) body = p_suite(s) else_clause = p_else_clause(s) return Nodes.WhileStatNode(pos, condition = test, body = body, else_clause = else_clause) def p_for_statement(s): # s.sy == 'for' pos = s.position() s.next() kw = p_for_bounds(s, allow_testlist=True) body = p_suite(s) else_clause = p_else_clause(s) kw.update(body = body, else_clause = else_clause) return Nodes.ForStatNode(pos, **kw) def p_for_bounds(s, allow_testlist=True): target = p_for_target(s) if s.sy == 'in': s.next() iterator = p_for_iterator(s, allow_testlist) return dict( target = target, iterator = iterator ) elif not s.in_python_file: if s.sy == 'from': s.next() bound1 = p_bit_expr(s) else: # Support shorter "for a <= x < b" syntax bound1, target = target, None rel1 = p_for_from_relation(s) name2_pos = s.position() name2 = p_ident(s) rel2_pos = s.position() rel2 = p_for_from_relation(s) bound2 = p_bit_expr(s) step = p_for_from_step(s) if target is None: target = ExprNodes.NameNode(name2_pos, name = name2) else: if not target.is_name: error(target.pos, "Target of for-from statement must be a variable name") elif name2 != target.name: error(name2_pos, "Variable name in for-from range does not match target") if rel1[0] != rel2[0]: error(rel2_pos, "Relation directions in for-from do not match") return dict(target = target, bound1 = bound1, relation1 = rel1, relation2 = rel2, bound2 = bound2, step = step, ) else: s.expect('in') return {} def p_for_from_relation(s): if s.sy in inequality_relations: op = s.sy s.next() return op else: s.error("Expected one of '<', '<=', '>' '>='") def p_for_from_step(s): if s.sy == 'IDENT' and s.systring == 'by': s.next() step = p_bit_expr(s) return step else: return None inequality_relations = cython.declare(set, set(['<', '<=', '>', '>='])) def p_target(s, terminator): pos = s.position() expr = p_starred_expr(s) if s.sy == ',': s.next() exprs = [expr] while s.sy != terminator: exprs.append(p_starred_expr(s)) if s.sy != ',': break s.next() return ExprNodes.TupleNode(pos, args = exprs) else: return expr def p_for_target(s): return p_target(s, 'in') def p_for_iterator(s, allow_testlist=True): pos = s.position() if allow_testlist: expr = p_testlist(s) else: expr = p_or_test(s) return ExprNodes.IteratorNode(pos, sequence = expr) def p_try_statement(s): # s.sy == 'try' pos = s.position() s.next() body = p_suite(s) except_clauses = [] else_clause = None if s.sy in ('except', 'else'): while s.sy == 'except': except_clauses.append(p_except_clause(s)) if s.sy == 'else': s.next() else_clause = p_suite(s) body = Nodes.TryExceptStatNode(pos, body = body, except_clauses = except_clauses, else_clause = else_clause) if s.sy != 'finally': return body # try-except-finally is equivalent to nested try-except/try-finally if s.sy == 'finally': s.next() finally_clause = p_suite(s) return Nodes.TryFinallyStatNode(pos, body = body, finally_clause = finally_clause) else: s.error("Expected 'except' or 'finally'") def p_except_clause(s): # s.sy == 'except' pos = s.position() s.next() exc_type = None exc_value = None is_except_as = False if s.sy != ':': exc_type = p_test(s) # normalise into list of single exception tests if isinstance(exc_type, ExprNodes.TupleNode): exc_type = exc_type.args else: exc_type = [exc_type] if s.sy == ',' or (s.sy == 'IDENT' and s.systring == 'as' and s.context.language_level == 2): s.next() exc_value = p_test(s) elif s.sy == 'IDENT' and s.systring == 'as': # Py3 syntax requires a name here s.next() pos2 = s.position() name = p_ident(s) exc_value = ExprNodes.NameNode(pos2, name = name) is_except_as = True body = p_suite(s) return Nodes.ExceptClauseNode(pos, pattern = exc_type, target = exc_value, body = body, is_except_as=is_except_as) def p_include_statement(s, ctx): pos = s.position() s.next() # 'include' unicode_include_file_name = p_string_literal(s, 'u')[2] s.expect_newline("Syntax error in include statement") if s.compile_time_eval: include_file_name = unicode_include_file_name include_file_path = s.context.find_include_file(include_file_name, pos) if include_file_path: s.included_files.append(include_file_name) f = Utils.open_source_file(include_file_path, mode="rU") source_desc = FileSourceDescriptor(include_file_path) s2 = PyrexScanner(f, source_desc, s, source_encoding=f.encoding, parse_comments=s.parse_comments) try: tree = p_statement_list(s2, ctx) finally: f.close() return tree else: return None else: return Nodes.PassStatNode(pos) def p_with_statement(s): s.next() # 'with' if s.systring == 'template' and not s.in_python_file: node = p_with_template(s) else: node = p_with_items(s) return node def p_with_items(s): pos = s.position() if not s.in_python_file and s.sy == 'IDENT' and s.systring in ('nogil', 'gil'): state = s.systring s.next() if s.sy == ',': s.next() body = p_with_items(s) else: body = p_suite(s) return Nodes.GILStatNode(pos, state = state, body = body) else: manager = p_test(s) target = None if s.sy == 'IDENT' and s.systring == 'as': s.next() target = p_starred_expr(s) if s.sy == ',': s.next() body = p_with_items(s) else: body = p_suite(s) return Nodes.WithStatNode(pos, manager = manager, target = target, body = body) def p_with_template(s): pos = s.position() templates = [] s.next() s.expect('[') templates.append(s.systring) s.next() while s.systring == ',': s.next() templates.append(s.systring) s.next() s.expect(']') if s.sy == ':': s.next() s.expect_newline("Syntax error in template function declaration") s.expect_indent() body_ctx = Ctx() body_ctx.templates = templates func_or_var = p_c_func_or_var_declaration(s, pos, body_ctx) s.expect_dedent() return func_or_var else: error(pos, "Syntax error in template function declaration") def p_simple_statement(s, first_statement = 0): #print "p_simple_statement:", s.sy, s.systring ### if s.sy == 'global': node = p_global_statement(s) elif s.sy == 'nonlocal': node = p_nonlocal_statement(s) elif s.sy == 'print': node = p_print_statement(s) elif s.sy == 'exec': node = p_exec_statement(s) elif s.sy == 'del': node = p_del_statement(s) elif s.sy == 'break': node = p_break_statement(s) elif s.sy == 'continue': node = p_continue_statement(s) elif s.sy == 'return': node = p_return_statement(s) elif s.sy == 'raise': node = p_raise_statement(s) elif s.sy in ('import', 'cimport'): node = p_import_statement(s) elif s.sy == 'from': node = p_from_import_statement(s, first_statement = first_statement) elif s.sy == 'yield': node = p_yield_statement(s) elif s.sy == 'assert': node = p_assert_statement(s) elif s.sy == 'pass': node = p_pass_statement(s) else: node = p_expression_or_assignment(s) return node def p_simple_statement_list(s, ctx, first_statement = 0): # Parse a series of simple statements on one line # separated by semicolons. stat = p_simple_statement(s, first_statement = first_statement) pos = stat.pos stats = [] if not isinstance(stat, Nodes.PassStatNode): stats.append(stat) while s.sy == ';': #print "p_simple_statement_list: maybe more to follow" ### s.next() if s.sy in ('NEWLINE', 'EOF'): break stat = p_simple_statement(s, first_statement = first_statement) if isinstance(stat, Nodes.PassStatNode): continue stats.append(stat) first_statement = False if not stats: stat = Nodes.PassStatNode(pos) elif len(stats) == 1: stat = stats[0] else: stat = Nodes.StatListNode(pos, stats = stats) s.expect_newline("Syntax error in simple statement list") return stat def p_compile_time_expr(s): old = s.compile_time_expr s.compile_time_expr = 1 expr = p_testlist(s) s.compile_time_expr = old return expr def p_DEF_statement(s): pos = s.position() denv = s.compile_time_env s.next() # 'DEF' name = p_ident(s) s.expect('=') expr = p_compile_time_expr(s) value = expr.compile_time_value(denv) #print "p_DEF_statement: %s = %r" % (name, value) ### denv.declare(name, value) s.expect_newline() return Nodes.PassStatNode(pos) def p_IF_statement(s, ctx): pos = s.position() saved_eval = s.compile_time_eval current_eval = saved_eval denv = s.compile_time_env result = None while 1: s.next() # 'IF' or 'ELIF' expr = p_compile_time_expr(s) s.compile_time_eval = current_eval and bool(expr.compile_time_value(denv)) body = p_suite(s, ctx) if s.compile_time_eval: result = body current_eval = 0 if s.sy != 'ELIF': break if s.sy == 'ELSE': s.next() s.compile_time_eval = current_eval body = p_suite(s, ctx) if current_eval: result = body if not result: result = Nodes.PassStatNode(pos) s.compile_time_eval = saved_eval return result def p_statement(s, ctx, first_statement = 0): cdef_flag = ctx.cdef_flag decorators = None if s.sy == 'ctypedef': if ctx.level not in ('module', 'module_pxd'): s.error("ctypedef statement not allowed here") #if ctx.api: # error(s.position(), "'api' not allowed with 'ctypedef'") return p_ctypedef_statement(s, ctx) elif s.sy == 'DEF': return p_DEF_statement(s) elif s.sy == 'IF': return p_IF_statement(s, ctx) elif s.sy == 'DECORATOR': if ctx.level not in ('module', 'class', 'c_class', 'function', 'property', 'module_pxd', 'c_class_pxd', 'other'): s.error('decorator not allowed here') s.level = ctx.level decorators = p_decorators(s) bad_toks = 'def', 'cdef', 'cpdef', 'class' if not ctx.allow_struct_enum_decorator and s.sy not in bad_toks: s.error("Decorators can only be followed by functions or classes") elif s.sy == 'pass' and cdef_flag: # empty cdef block return p_pass_statement(s, with_newline = 1) overridable = 0 if s.sy == 'cdef': cdef_flag = 1 s.next() elif s.sy == 'cpdef': cdef_flag = 1 overridable = 1 s.next() if cdef_flag: if ctx.level not in ('module', 'module_pxd', 'function', 'c_class', 'c_class_pxd'): s.error('cdef statement not allowed here') s.level = ctx.level node = p_cdef_statement(s, ctx(overridable = overridable)) if decorators is not None: tup = Nodes.CFuncDefNode, Nodes.CVarDefNode, Nodes.CClassDefNode if ctx.allow_struct_enum_decorator: tup += Nodes.CStructOrUnionDefNode, Nodes.CEnumDefNode if not isinstance(node, tup): s.error("Decorators can only be followed by functions or classes") node.decorators = decorators return node else: if ctx.api: s.error("'api' not allowed with this statement", fatal=False) elif s.sy == 'def': # def statements aren't allowed in pxd files, except # as part of a cdef class if ('pxd' in ctx.level) and (ctx.level != 'c_class_pxd'): s.error('def statement not allowed here') s.level = ctx.level return p_def_statement(s, decorators) elif s.sy == 'class': if ctx.level not in ('module', 'function', 'class', 'other'): s.error("class definition not allowed here") return p_class_statement(s, decorators) elif s.sy == 'include': if ctx.level not in ('module', 'module_pxd'): s.error("include statement not allowed here") return p_include_statement(s, ctx) elif ctx.level == 'c_class' and s.sy == 'IDENT' and s.systring == 'property': return p_property_decl(s) elif s.sy == 'pass' and ctx.level != 'property': return p_pass_statement(s, with_newline=True) else: if ctx.level in ('c_class_pxd', 'property'): node = p_ignorable_statement(s) if node is not None: return node s.error("Executable statement not allowed here") if s.sy == 'if': return p_if_statement(s) elif s.sy == 'while': return p_while_statement(s) elif s.sy == 'for': return p_for_statement(s) elif s.sy == 'try': return p_try_statement(s) elif s.sy == 'with': return p_with_statement(s) else: return p_simple_statement_list( s, ctx, first_statement = first_statement) def p_statement_list(s, ctx, first_statement = 0): # Parse a series of statements separated by newlines. pos = s.position() stats = [] while s.sy not in ('DEDENT', 'EOF'): stat = p_statement(s, ctx, first_statement = first_statement) if isinstance(stat, Nodes.PassStatNode): continue stats.append(stat) first_statement = False if not stats: return Nodes.PassStatNode(pos) elif len(stats) == 1: return stats[0] else: return Nodes.StatListNode(pos, stats = stats) def p_suite(s, ctx=Ctx()): return p_suite_with_docstring(s, ctx, with_doc_only=False)[1] def p_suite_with_docstring(s, ctx, with_doc_only=False): s.expect(':') doc = None if s.sy == 'NEWLINE': s.next() s.expect_indent() if with_doc_only: doc = p_doc_string(s) body = p_statement_list(s, ctx) s.expect_dedent() else: if ctx.api: s.error("'api' not allowed with this statement", fatal=False) if ctx.level in ('module', 'class', 'function', 'other'): body = p_simple_statement_list(s, ctx) else: body = p_pass_statement(s) s.expect_newline("Syntax error in declarations") if not with_doc_only: doc, body = _extract_docstring(body) return doc, body def p_positional_and_keyword_args(s, end_sy_set, templates = None): """ Parses positional and keyword arguments. end_sy_set should contain any s.sy that terminate the argument list. Argument expansion (* and **) are not allowed. Returns: (positional_args, keyword_args) """ positional_args = [] keyword_args = [] pos_idx = 0 while s.sy not in end_sy_set: if s.sy == '*' or s.sy == '**': s.error('Argument expansion not allowed here.', fatal=False) parsed_type = False if s.sy == 'IDENT' and s.peek()[0] == '=': ident = s.systring s.next() # s.sy is '=' s.next() if looking_at_expr(s): arg = p_test(s) else: base_type = p_c_base_type(s, templates = templates) declarator = p_c_declarator(s, empty = 1) arg = Nodes.CComplexBaseTypeNode(base_type.pos, base_type = base_type, declarator = declarator) parsed_type = True keyword_node = ExprNodes.IdentifierStringNode( arg.pos, value = EncodedString(ident)) keyword_args.append((keyword_node, arg)) was_keyword = True else: if looking_at_expr(s): arg = p_test(s) else: base_type = p_c_base_type(s, templates = templates) declarator = p_c_declarator(s, empty = 1) arg = Nodes.CComplexBaseTypeNode(base_type.pos, base_type = base_type, declarator = declarator) parsed_type = True positional_args.append(arg) pos_idx += 1 if len(keyword_args) > 0: s.error("Non-keyword arg following keyword arg", pos=arg.pos) if s.sy != ',': if s.sy not in end_sy_set: if parsed_type: s.error("Unmatched %s" % " or ".join(end_sy_set)) break s.next() return positional_args, keyword_args def p_c_base_type(s, self_flag = 0, nonempty = 0, templates = None): # If self_flag is true, this is the base type for the # self argument of a C method of an extension type. if s.sy == '(': return p_c_complex_base_type(s, templates = templates) else: return p_c_simple_base_type(s, self_flag, nonempty = nonempty, templates = templates) def p_calling_convention(s): if s.sy == 'IDENT' and s.systring in calling_convention_words: result = s.systring s.next() return result else: return "" calling_convention_words = cython.declare( set, set(["__stdcall", "__cdecl", "__fastcall"])) def p_c_complex_base_type(s, templates = None): # s.sy == '(' pos = s.position() s.next() base_type = p_c_base_type(s, templates = templates) declarator = p_c_declarator(s, empty = 1) s.expect(')') type_node = Nodes.CComplexBaseTypeNode(pos, base_type = base_type, declarator = declarator) if s.sy == '[': if is_memoryviewslice_access(s): type_node = p_memoryviewslice_access(s, type_node) else: type_node = p_buffer_or_template(s, type_node, templates) return type_node def p_c_simple_base_type(s, self_flag, nonempty, templates = None): #print "p_c_simple_base_type: self_flag =", self_flag, nonempty is_basic = 0 signed = 1 longness = 0 complex = 0 module_path = [] pos = s.position() if not s.sy == 'IDENT': error(pos, "Expected an identifier, found '%s'" % s.sy) if s.systring == 'const': s.next() base_type = p_c_base_type(s, self_flag = self_flag, nonempty = nonempty, templates = templates) return Nodes.CConstTypeNode(pos, base_type = base_type) if looking_at_base_type(s): #print "p_c_simple_base_type: looking_at_base_type at", s.position() is_basic = 1 if s.sy == 'IDENT' and s.systring in special_basic_c_types: signed, longness = special_basic_c_types[s.systring] name = s.systring s.next() else: signed, longness = p_sign_and_longness(s) if s.sy == 'IDENT' and s.systring in basic_c_type_names: name = s.systring s.next() else: name = 'int' # long [int], short [int], long [int] complex, etc. if s.sy == 'IDENT' and s.systring == 'complex': complex = 1 s.next() elif looking_at_dotted_name(s): #print "p_c_simple_base_type: looking_at_type_name at", s.position() name = s.systring s.next() while s.sy == '.': module_path.append(name) s.next() name = p_ident(s) else: name = s.systring s.next() if nonempty and s.sy != 'IDENT': # Make sure this is not a declaration of a variable or function. if s.sy == '(': s.next() if (s.sy == '*' or s.sy == '**' or s.sy == '&' or (s.sy == 'IDENT' and s.systring in calling_convention_words)): s.put_back('(', '(') else: s.put_back('(', '(') s.put_back('IDENT', name) name = None elif s.sy not in ('*', '**', '[', '&'): s.put_back('IDENT', name) name = None type_node = Nodes.CSimpleBaseTypeNode(pos, name = name, module_path = module_path, is_basic_c_type = is_basic, signed = signed, complex = complex, longness = longness, is_self_arg = self_flag, templates = templates) # declarations here. if s.sy == '[': if is_memoryviewslice_access(s): type_node = p_memoryviewslice_access(s, type_node) else: type_node = p_buffer_or_template(s, type_node, templates) if s.sy == '.': s.next() name = p_ident(s) type_node = Nodes.CNestedBaseTypeNode(pos, base_type = type_node, name = name) return type_node def p_buffer_or_template(s, base_type_node, templates): # s.sy == '[' pos = s.position() s.next() # Note that buffer_positional_options_count=1, so the only positional argument is dtype. # For templated types, all parameters are types. positional_args, keyword_args = ( p_positional_and_keyword_args(s, (']',), templates) ) s.expect(']') if s.sy == '[': base_type_node = p_buffer_or_template(s, base_type_node, templates) keyword_dict = ExprNodes.DictNode(pos, key_value_pairs = [ ExprNodes.DictItemNode(pos=key.pos, key=key, value=value) for key, value in keyword_args ]) result = Nodes.TemplatedTypeNode(pos, positional_args = positional_args, keyword_args = keyword_dict, base_type_node = base_type_node) return result def p_bracketed_base_type(s, base_type_node, nonempty, empty): # s.sy == '[' if empty and not nonempty: # sizeof-like thing. Only anonymous C arrays allowed (int[SIZE]). return base_type_node elif not empty and nonempty: # declaration of either memoryview slice or buffer. if is_memoryviewslice_access(s): return p_memoryviewslice_access(s, base_type_node) else: return p_buffer_or_template(s, base_type_node, None) # return p_buffer_access(s, base_type_node) elif not empty and not nonempty: # only anonymous C arrays and memoryview slice arrays here. We # disallow buffer declarations for now, due to ambiguity with anonymous # C arrays. if is_memoryviewslice_access(s): return p_memoryviewslice_access(s, base_type_node) else: return base_type_node def is_memoryviewslice_access(s): # s.sy == '[' # a memoryview slice declaration is distinguishable from a buffer access # declaration by the first entry in the bracketed list. The buffer will # not have an unnested colon in the first entry; the memoryview slice will. saved = [(s.sy, s.systring)] s.next() retval = False if s.systring == ':': retval = True elif s.sy == 'INT': saved.append((s.sy, s.systring)) s.next() if s.sy == ':': retval = True for sv in saved[::-1]: s.put_back(*sv) return retval def p_memoryviewslice_access(s, base_type_node): # s.sy == '[' pos = s.position() s.next() subscripts, _ = p_subscript_list(s) # make sure each entry in subscripts is a slice for subscript in subscripts: if len(subscript) < 2: s.error("An axis specification in memoryview declaration does not have a ':'.") s.expect(']') indexes = make_slice_nodes(pos, subscripts) result = Nodes.MemoryViewSliceTypeNode(pos, base_type_node = base_type_node, axes = indexes) return result def looking_at_name(s): return s.sy == 'IDENT' and not s.systring in calling_convention_words def looking_at_expr(s): if s.systring in base_type_start_words: return False elif s.sy == 'IDENT': is_type = False name = s.systring dotted_path = [] s.next() while s.sy == '.': s.next() dotted_path.append(s.systring) s.expect('IDENT') saved = s.sy, s.systring if s.sy == 'IDENT': is_type = True elif s.sy == '*' or s.sy == '**': s.next() is_type = s.sy in (')', ']') s.put_back(*saved) elif s.sy == '(': s.next() is_type = s.sy == '*' s.put_back(*saved) elif s.sy == '[': s.next() is_type = s.sy == ']' s.put_back(*saved) dotted_path.reverse() for p in dotted_path: s.put_back('IDENT', p) s.put_back('.', '.') s.put_back('IDENT', name) return not is_type and saved[0] else: return True def looking_at_base_type(s): #print "looking_at_base_type?", s.sy, s.systring, s.position() return s.sy == 'IDENT' and s.systring in base_type_start_words def looking_at_dotted_name(s): if s.sy == 'IDENT': name = s.systring s.next() result = s.sy == '.' s.put_back('IDENT', name) return result else: return 0 def looking_at_call(s): "See if we're looking at a.b.c(" # Don't mess up the original position, so save and restore it. # Unfortunately there's no good way to handle this, as a subsequent call # to next() will not advance the position until it reads a new token. position = s.start_line, s.start_col result = looking_at_expr(s) == u'(' if not result: s.start_line, s.start_col = position return result basic_c_type_names = cython.declare( set, set(["void", "char", "int", "float", "double", "bint"])) special_basic_c_types = cython.declare(dict, { # name : (signed, longness) "Py_UNICODE" : (0, 0), "Py_UCS4" : (0, 0), "Py_ssize_t" : (2, 0), "ssize_t" : (2, 0), "size_t" : (0, 0), "ptrdiff_t" : (2, 0), }) sign_and_longness_words = cython.declare( set, set(["short", "long", "signed", "unsigned"])) base_type_start_words = cython.declare( set, basic_c_type_names | sign_and_longness_words | set(special_basic_c_types)) struct_enum_union = cython.declare( set, set(["struct", "union", "enum", "packed"])) def p_sign_and_longness(s): signed = 1 longness = 0 while s.sy == 'IDENT' and s.systring in sign_and_longness_words: if s.systring == 'unsigned': signed = 0 elif s.systring == 'signed': signed = 2 elif s.systring == 'short': longness = -1 elif s.systring == 'long': longness += 1 s.next() return signed, longness def p_opt_cname(s): literal = p_opt_string_literal(s, 'u') if literal is not None: cname = EncodedString(literal) cname.encoding = s.source_encoding else: cname = None return cname def p_c_declarator(s, ctx = Ctx(), empty = 0, is_type = 0, cmethod_flag = 0, assignable = 0, nonempty = 0, calling_convention_allowed = 0): # If empty is true, the declarator must be empty. If nonempty is true, # the declarator must be nonempty. Otherwise we don't care. # If cmethod_flag is true, then if this declarator declares # a function, it's a C method of an extension type. pos = s.position() if s.sy == '(': s.next() if s.sy == ')' or looking_at_name(s): base = Nodes.CNameDeclaratorNode(pos, name = EncodedString(u""), cname = None) result = p_c_func_declarator(s, pos, ctx, base, cmethod_flag) else: result = p_c_declarator(s, ctx, empty = empty, is_type = is_type, cmethod_flag = cmethod_flag, nonempty = nonempty, calling_convention_allowed = 1) s.expect(')') else: result = p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag, assignable, nonempty) if not calling_convention_allowed and result.calling_convention and s.sy != '(': error(s.position(), "%s on something that is not a function" % result.calling_convention) while s.sy in ('[', '('): pos = s.position() if s.sy == '[': result = p_c_array_declarator(s, result) else: # sy == '(' s.next() result = p_c_func_declarator(s, pos, ctx, result, cmethod_flag) cmethod_flag = 0 return result def p_c_array_declarator(s, base): pos = s.position() s.next() # '[' if s.sy != ']': dim = p_testlist(s) else: dim = None s.expect(']') return Nodes.CArrayDeclaratorNode(pos, base = base, dimension = dim) def p_c_func_declarator(s, pos, ctx, base, cmethod_flag): # Opening paren has already been skipped args = p_c_arg_list(s, ctx, cmethod_flag = cmethod_flag, nonempty_declarators = 0) ellipsis = p_optional_ellipsis(s) s.expect(')') nogil = p_nogil(s) exc_val, exc_check = p_exception_value_clause(s) with_gil = p_with_gil(s) return Nodes.CFuncDeclaratorNode(pos, base = base, args = args, has_varargs = ellipsis, exception_value = exc_val, exception_check = exc_check, nogil = nogil or ctx.nogil or with_gil, with_gil = with_gil) supported_overloaded_operators = cython.declare(set, set([ '+', '-', '*', '/', '%', '++', '--', '~', '|', '&', '^', '<<', '>>', ',', '==', '!=', '>=', '>', '<=', '<', '[]', '()', '!', ])) def p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag, assignable, nonempty): pos = s.position() calling_convention = p_calling_convention(s) if s.sy == '*': s.next() if s.systring == 'const': const_pos = s.position() s.next() const_base = p_c_declarator(s, ctx, empty = empty, is_type = is_type, cmethod_flag = cmethod_flag, assignable = assignable, nonempty = nonempty) base = Nodes.CConstDeclaratorNode(const_pos, base = const_base) else: base = p_c_declarator(s, ctx, empty = empty, is_type = is_type, cmethod_flag = cmethod_flag, assignable = assignable, nonempty = nonempty) result = Nodes.CPtrDeclaratorNode(pos, base = base) elif s.sy == '**': # scanner returns this as a single token s.next() base = p_c_declarator(s, ctx, empty = empty, is_type = is_type, cmethod_flag = cmethod_flag, assignable = assignable, nonempty = nonempty) result = Nodes.CPtrDeclaratorNode(pos, base = Nodes.CPtrDeclaratorNode(pos, base = base)) elif s.sy == '&': s.next() base = p_c_declarator(s, ctx, empty = empty, is_type = is_type, cmethod_flag = cmethod_flag, assignable = assignable, nonempty = nonempty) result = Nodes.CReferenceDeclaratorNode(pos, base = base) else: rhs = None if s.sy == 'IDENT': name = EncodedString(s.systring) if empty: error(s.position(), "Declarator should be empty") s.next() cname = p_opt_cname(s) if name != 'operator' and s.sy == '=' and assignable: s.next() rhs = p_test(s) else: if nonempty: error(s.position(), "Empty declarator") name = "" cname = None if cname is None and ctx.namespace is not None and nonempty: cname = ctx.namespace + "::" + name if name == 'operator' and ctx.visibility == 'extern' and nonempty: op = s.sy if [1 for c in op if c in '+-*/<=>!%&|([^~,']: s.next() # Handle diphthong operators. if op == '(': s.expect(')') op = '()' elif op == '[': s.expect(']') op = '[]' elif op in ('-', '+', '|', '&') and s.sy == op: op *= 2 # ++, --, ... s.next() elif s.sy == '=': op += s.sy # +=, -=, ... s.next() if op not in supported_overloaded_operators: s.error("Overloading operator '%s' not yet supported." % op, fatal=False) name += op result = Nodes.CNameDeclaratorNode(pos, name = name, cname = cname, default = rhs) result.calling_convention = calling_convention return result def p_nogil(s): if s.sy == 'IDENT' and s.systring == 'nogil': s.next() return 1 else: return 0 def p_with_gil(s): if s.sy == 'with': s.next() s.expect_keyword('gil') return 1 else: return 0 def p_exception_value_clause(s): exc_val = None exc_check = 0 if s.sy == 'except': s.next() if s.sy == '*': exc_check = 1 s.next() elif s.sy == '+': exc_check = '+' s.next() if s.sy == 'IDENT': name = s.systring s.next() exc_val = p_name(s, name) else: if s.sy == '?': exc_check = 1 s.next() exc_val = p_test(s) return exc_val, exc_check c_arg_list_terminators = cython.declare(set, set(['*', '**', '.', ')'])) def p_c_arg_list(s, ctx = Ctx(), in_pyfunc = 0, cmethod_flag = 0, nonempty_declarators = 0, kw_only = 0, annotated = 1): # Comma-separated list of C argument declarations, possibly empty. # May have a trailing comma. args = [] is_self_arg = cmethod_flag while s.sy not in c_arg_list_terminators: args.append(p_c_arg_decl(s, ctx, in_pyfunc, is_self_arg, nonempty = nonempty_declarators, kw_only = kw_only, annotated = annotated)) if s.sy != ',': break s.next() is_self_arg = 0 return args def p_optional_ellipsis(s): if s.sy == '.': expect_ellipsis(s) return 1 else: return 0 def p_c_arg_decl(s, ctx, in_pyfunc, cmethod_flag = 0, nonempty = 0, kw_only = 0, annotated = 1): pos = s.position() not_none = or_none = 0 default = None annotation = None if s.in_python_file: # empty type declaration base_type = Nodes.CSimpleBaseTypeNode(pos, name = None, module_path = [], is_basic_c_type = 0, signed = 0, complex = 0, longness = 0, is_self_arg = cmethod_flag, templates = None) else: base_type = p_c_base_type(s, cmethod_flag, nonempty = nonempty) declarator = p_c_declarator(s, ctx, nonempty = nonempty) if s.sy in ('not', 'or') and not s.in_python_file: kind = s.sy s.next() if s.sy == 'IDENT' and s.systring == 'None': s.next() else: s.error("Expected 'None'") if not in_pyfunc: error(pos, "'%s None' only allowed in Python functions" % kind) or_none = kind == 'or' not_none = kind == 'not' if annotated and s.sy == ':': s.next() annotation = p_test(s) if s.sy == '=': s.next() if 'pxd' in ctx.level: if s.sy not in ['*', '?']: error(pos, "default values cannot be specified in pxd files, use ? or *") default = ExprNodes.BoolNode(1) s.next() else: default = p_test(s) return Nodes.CArgDeclNode(pos, base_type = base_type, declarator = declarator, not_none = not_none, or_none = or_none, default = default, annotation = annotation, kw_only = kw_only) def p_api(s): if s.sy == 'IDENT' and s.systring == 'api': s.next() return 1 else: return 0 def p_cdef_statement(s, ctx): pos = s.position() ctx.visibility = p_visibility(s, ctx.visibility) ctx.api = ctx.api or p_api(s) if ctx.api: if ctx.visibility not in ('private', 'public'): error(pos, "Cannot combine 'api' with '%s'" % ctx.visibility) if (ctx.visibility == 'extern') and s.sy == 'from': return p_cdef_extern_block(s, pos, ctx) elif s.sy == 'import': s.next() return p_cdef_extern_block(s, pos, ctx) elif p_nogil(s): ctx.nogil = 1 if ctx.overridable: error(pos, "cdef blocks cannot be declared cpdef") return p_cdef_block(s, ctx) elif s.sy == ':': if ctx.overridable: error(pos, "cdef blocks cannot be declared cpdef") return p_cdef_block(s, ctx) elif s.sy == 'class': if ctx.level not in ('module', 'module_pxd'): error(pos, "Extension type definition not allowed here") if ctx.overridable: error(pos, "Extension types cannot be declared cpdef") return p_c_class_definition(s, pos, ctx) elif s.sy == 'IDENT' and s.systring == 'cppclass': return p_cpp_class_definition(s, pos, ctx) elif s.sy == 'IDENT' and s.systring in struct_enum_union: if ctx.level not in ('module', 'module_pxd'): error(pos, "C struct/union/enum definition not allowed here") if ctx.overridable: error(pos, "C struct/union/enum cannot be declared cpdef") return p_struct_enum(s, pos, ctx) elif s.sy == 'IDENT' and s.systring == 'fused': return p_fused_definition(s, pos, ctx) else: return p_c_func_or_var_declaration(s, pos, ctx) def p_cdef_block(s, ctx): return p_suite(s, ctx(cdef_flag = 1)) def p_cdef_extern_block(s, pos, ctx): if ctx.overridable: error(pos, "cdef extern blocks cannot be declared cpdef") include_file = None s.expect('from') if s.sy == '*': s.next() else: include_file = p_string_literal(s, 'u')[2] ctx = ctx(cdef_flag = 1, visibility = 'extern') if s.systring == "namespace": s.next() ctx.namespace = p_string_literal(s, 'u')[2] if p_nogil(s): ctx.nogil = 1 body = p_suite(s, ctx) return Nodes.CDefExternNode(pos, include_file = include_file, body = body, namespace = ctx.namespace) def p_c_enum_definition(s, pos, ctx): # s.sy == ident 'enum' s.next() if s.sy == 'IDENT': name = s.systring s.next() cname = p_opt_cname(s) if cname is None and ctx.namespace is not None: cname = ctx.namespace + "::" + name else: name = None cname = None items = None s.expect(':') items = [] if s.sy != 'NEWLINE': p_c_enum_line(s, ctx, items) else: s.next() # 'NEWLINE' s.expect_indent() while s.sy not in ('DEDENT', 'EOF'): p_c_enum_line(s, ctx, items) s.expect_dedent() return Nodes.CEnumDefNode( pos, name = name, cname = cname, items = items, typedef_flag = ctx.typedef_flag, visibility = ctx.visibility, api = ctx.api, in_pxd = ctx.level == 'module_pxd') def p_c_enum_line(s, ctx, items): if s.sy != 'pass': p_c_enum_item(s, ctx, items) while s.sy == ',': s.next() if s.sy in ('NEWLINE', 'EOF'): break p_c_enum_item(s, ctx, items) else: s.next() s.expect_newline("Syntax error in enum item list") def p_c_enum_item(s, ctx, items): pos = s.position() name = p_ident(s) cname = p_opt_cname(s) if cname is None and ctx.namespace is not None: cname = ctx.namespace + "::" + name value = None if s.sy == '=': s.next() value = p_test(s) items.append(Nodes.CEnumDefItemNode(pos, name = name, cname = cname, value = value)) def p_c_struct_or_union_definition(s, pos, ctx): packed = False if s.systring == 'packed': packed = True s.next() if s.sy != 'IDENT' or s.systring != 'struct': s.expected('struct') # s.sy == ident 'struct' or 'union' kind = s.systring s.next() name = p_ident(s) cname = p_opt_cname(s) if cname is None and ctx.namespace is not None: cname = ctx.namespace + "::" + name attributes = None if s.sy == ':': s.next() s.expect('NEWLINE') s.expect_indent() attributes = [] body_ctx = Ctx() while s.sy != 'DEDENT': if s.sy != 'pass': attributes.append( p_c_func_or_var_declaration(s, s.position(), body_ctx)) else: s.next() s.expect_newline("Expected a newline") s.expect_dedent() else: s.expect_newline("Syntax error in struct or union definition") return Nodes.CStructOrUnionDefNode(pos, name = name, cname = cname, kind = kind, attributes = attributes, typedef_flag = ctx.typedef_flag, visibility = ctx.visibility, api = ctx.api, in_pxd = ctx.level == 'module_pxd', packed = packed) def p_fused_definition(s, pos, ctx): """ c(type)def fused my_fused_type: ... """ # s.systring == 'fused' if ctx.level not in ('module', 'module_pxd'): error(pos, "Fused type definition not allowed here") s.next() name = p_ident(s) s.expect(":") s.expect_newline() s.expect_indent() types = [] while s.sy != 'DEDENT': if s.sy != 'pass': #types.append(p_c_declarator(s)) types.append(p_c_base_type(s)) #, nonempty=1)) else: s.next() s.expect_newline() s.expect_dedent() if not types: error(pos, "Need at least one type") return Nodes.FusedTypeNode(pos, name=name, types=types) def p_struct_enum(s, pos, ctx): if s.systring == 'enum': return p_c_enum_definition(s, pos, ctx) else: return p_c_struct_or_union_definition(s, pos, ctx) def p_visibility(s, prev_visibility): pos = s.position() visibility = prev_visibility if s.sy == 'IDENT' and s.systring in ('extern', 'public', 'readonly'): visibility = s.systring if prev_visibility != 'private' and visibility != prev_visibility: s.error("Conflicting visibility options '%s' and '%s'" % (prev_visibility, visibility), fatal=False) s.next() return visibility def p_c_modifiers(s): if s.sy == 'IDENT' and s.systring in ('inline',): modifier = s.systring s.next() return [modifier] + p_c_modifiers(s) return [] def p_c_func_or_var_declaration(s, pos, ctx): cmethod_flag = ctx.level in ('c_class', 'c_class_pxd') modifiers = p_c_modifiers(s) base_type = p_c_base_type(s, nonempty = 1, templates = ctx.templates) declarator = p_c_declarator(s, ctx, cmethod_flag = cmethod_flag, assignable = 1, nonempty = 1) declarator.overridable = ctx.overridable if s.sy == 'IDENT' and s.systring == 'const' and ctx.level == 'cpp_class': s.next() is_const_method = 1 else: is_const_method = 0 if s.sy == ':': if ctx.level not in ('module', 'c_class', 'module_pxd', 'c_class_pxd', 'cpp_class') and not ctx.templates: s.error("C function definition not allowed here") doc, suite = p_suite_with_docstring(s, Ctx(level='function')) result = Nodes.CFuncDefNode(pos, visibility = ctx.visibility, base_type = base_type, declarator = declarator, body = suite, doc = doc, modifiers = modifiers, api = ctx.api, overridable = ctx.overridable, is_const_method = is_const_method) else: #if api: # s.error("'api' not allowed with variable declaration") if is_const_method: declarator.is_const_method = is_const_method declarators = [declarator] while s.sy == ',': s.next() if s.sy == 'NEWLINE': break declarator = p_c_declarator(s, ctx, cmethod_flag = cmethod_flag, assignable = 1, nonempty = 1) declarators.append(declarator) doc_line = s.start_line + 1 s.expect_newline("Syntax error in C variable declaration") if ctx.level == 'c_class' and s.start_line == doc_line: doc = p_doc_string(s) else: doc = None result = Nodes.CVarDefNode(pos, visibility = ctx.visibility, base_type = base_type, declarators = declarators, in_pxd = ctx.level in ('module_pxd', 'c_class_pxd'), doc = doc, api = ctx.api, modifiers = modifiers, overridable = ctx.overridable) return result def p_ctypedef_statement(s, ctx): # s.sy == 'ctypedef' pos = s.position() s.next() visibility = p_visibility(s, ctx.visibility) api = p_api(s) ctx = ctx(typedef_flag = 1, visibility = visibility) if api: ctx.api = 1 if s.sy == 'class': return p_c_class_definition(s, pos, ctx) elif s.sy == 'IDENT' and s.systring in struct_enum_union: return p_struct_enum(s, pos, ctx) elif s.sy == 'IDENT' and s.systring == 'fused': return p_fused_definition(s, pos, ctx) else: base_type = p_c_base_type(s, nonempty = 1) declarator = p_c_declarator(s, ctx, is_type = 1, nonempty = 1) s.expect_newline("Syntax error in ctypedef statement") return Nodes.CTypeDefNode( pos, base_type = base_type, declarator = declarator, visibility = visibility, api = api, in_pxd = ctx.level == 'module_pxd') def p_decorators(s): decorators = [] while s.sy == 'DECORATOR': pos = s.position() s.next() decstring = p_dotted_name(s, as_allowed=0)[2] names = decstring.split('.') decorator = ExprNodes.NameNode(pos, name=EncodedString(names[0])) for name in names[1:]: decorator = ExprNodes.AttributeNode(pos, attribute=EncodedString(name), obj=decorator) if s.sy == '(': decorator = p_call(s, decorator) decorators.append(Nodes.DecoratorNode(pos, decorator=decorator)) s.expect_newline("Expected a newline after decorator") return decorators def p_def_statement(s, decorators=None): # s.sy == 'def' pos = s.position() s.next() name = EncodedString( p_ident(s) ) s.expect('(') args, star_arg, starstar_arg = p_varargslist(s, terminator=')') s.expect(')') if p_nogil(s): error(pos, "Python function cannot be declared nogil") return_type_annotation = None if s.sy == '->': s.next() return_type_annotation = p_test(s) doc, body = p_suite_with_docstring(s, Ctx(level='function')) return Nodes.DefNode(pos, name = name, args = args, star_arg = star_arg, starstar_arg = starstar_arg, doc = doc, body = body, decorators = decorators, return_type_annotation = return_type_annotation) def p_varargslist(s, terminator=')', annotated=1): args = p_c_arg_list(s, in_pyfunc = 1, nonempty_declarators = 1, annotated = annotated) star_arg = None starstar_arg = None if s.sy == '*': s.next() if s.sy == 'IDENT': star_arg = p_py_arg_decl(s, annotated=annotated) if s.sy == ',': s.next() args.extend(p_c_arg_list(s, in_pyfunc = 1, nonempty_declarators = 1, kw_only = 1, annotated = annotated)) elif s.sy != terminator: s.error("Syntax error in Python function argument list") if s.sy == '**': s.next() starstar_arg = p_py_arg_decl(s, annotated=annotated) return (args, star_arg, starstar_arg) def p_py_arg_decl(s, annotated = 1): pos = s.position() name = p_ident(s) annotation = None if annotated and s.sy == ':': s.next() annotation = p_test(s) return Nodes.PyArgDeclNode(pos, name = name, annotation = annotation) def p_class_statement(s, decorators): # s.sy == 'class' pos = s.position() s.next() class_name = EncodedString( p_ident(s) ) class_name.encoding = s.source_encoding arg_tuple = None keyword_dict = None starstar_arg = None if s.sy == '(': positional_args, keyword_args, star_arg, starstar_arg = \ p_call_parse_args(s, allow_genexp = False) arg_tuple, keyword_dict = p_call_build_packed_args( pos, positional_args, keyword_args, star_arg, None) if arg_tuple is None: # XXX: empty arg_tuple arg_tuple = ExprNodes.TupleNode(pos, args=[]) doc, body = p_suite_with_docstring(s, Ctx(level='class')) return Nodes.PyClassDefNode( pos, name=class_name, bases=arg_tuple, keyword_args=keyword_dict, starstar_arg=starstar_arg, doc=doc, body=body, decorators=decorators, force_py3_semantics=s.context.language_level >= 3) def p_c_class_definition(s, pos, ctx): # s.sy == 'class' s.next() module_path = [] class_name = p_ident(s) while s.sy == '.': s.next() module_path.append(class_name) class_name = p_ident(s) if module_path and ctx.visibility != 'extern': error(pos, "Qualified class name only allowed for 'extern' C class") if module_path and s.sy == 'IDENT' and s.systring == 'as': s.next() as_name = p_ident(s) else: as_name = class_name objstruct_name = None typeobj_name = None base_class_module = None base_class_name = None if s.sy == '(': s.next() base_class_path = [p_ident(s)] while s.sy == '.': s.next() base_class_path.append(p_ident(s)) if s.sy == ',': s.error("C class may only have one base class", fatal=False) s.expect(')') base_class_module = ".".join(base_class_path[:-1]) base_class_name = base_class_path[-1] if s.sy == '[': if ctx.visibility not in ('public', 'extern') and not ctx.api: error(s.position(), "Name options only allowed for 'public', 'api', or 'extern' C class") objstruct_name, typeobj_name = p_c_class_options(s) if s.sy == ':': if ctx.level == 'module_pxd': body_level = 'c_class_pxd' else: body_level = 'c_class' doc, body = p_suite_with_docstring(s, Ctx(level=body_level)) else: s.expect_newline("Syntax error in C class definition") doc = None body = None if ctx.visibility == 'extern': if not module_path: error(pos, "Module name required for 'extern' C class") if typeobj_name: error(pos, "Type object name specification not allowed for 'extern' C class") elif ctx.visibility == 'public': if not objstruct_name: error(pos, "Object struct name specification required for 'public' C class") if not typeobj_name: error(pos, "Type object name specification required for 'public' C class") elif ctx.visibility == 'private': if ctx.api: if not objstruct_name: error(pos, "Object struct name specification required for 'api' C class") if not typeobj_name: error(pos, "Type object name specification required for 'api' C class") else: error(pos, "Invalid class visibility '%s'" % ctx.visibility) return Nodes.CClassDefNode(pos, visibility = ctx.visibility, typedef_flag = ctx.typedef_flag, api = ctx.api, module_name = ".".join(module_path), class_name = class_name, as_name = as_name, base_class_module = base_class_module, base_class_name = base_class_name, objstruct_name = objstruct_name, typeobj_name = typeobj_name, in_pxd = ctx.level == 'module_pxd', doc = doc, body = body) def p_c_class_options(s): objstruct_name = None typeobj_name = None s.expect('[') while 1: if s.sy != 'IDENT': break if s.systring == 'object': s.next() objstruct_name = p_ident(s) elif s.systring == 'type': s.next() typeobj_name = p_ident(s) if s.sy != ',': break s.next() s.expect(']', "Expected 'object' or 'type'") return objstruct_name, typeobj_name def p_property_decl(s): pos = s.position() s.next() # 'property' name = p_ident(s) doc, body = p_suite_with_docstring( s, Ctx(level='property'), with_doc_only=True) return Nodes.PropertyNode(pos, name=name, doc=doc, body=body) def p_ignorable_statement(s): """ Parses any kind of ignorable statement that is allowed in .pxd files. """ if s.sy == 'BEGIN_STRING': pos = s.position() string_node = p_atom(s) if s.sy != 'EOF': s.expect_newline("Syntax error in string") return Nodes.ExprStatNode(pos, expr=string_node) return None def p_doc_string(s): if s.sy == 'BEGIN_STRING': pos = s.position() kind, bytes_result, unicode_result = p_cat_string_literal(s) if s.sy != 'EOF': s.expect_newline("Syntax error in doc string") if kind in ('u', ''): return unicode_result warning(pos, "Python 3 requires docstrings to be unicode strings") return bytes_result else: return None def _extract_docstring(node): """ Extract a docstring from a statement or from the first statement in a list. Remove the statement if found. Return a tuple (plain-docstring or None, node). """ doc_node = None if node is None: pass elif isinstance(node, Nodes.ExprStatNode): if node.expr.is_string_literal: doc_node = node.expr node = Nodes.StatListNode(node.pos, stats=[]) elif isinstance(node, Nodes.StatListNode) and node.stats: stats = node.stats if isinstance(stats[0], Nodes.ExprStatNode): if stats[0].expr.is_string_literal: doc_node = stats[0].expr del stats[0] if doc_node is None: doc = None elif isinstance(doc_node, ExprNodes.BytesNode): warning(node.pos, "Python 3 requires docstrings to be unicode strings") doc = doc_node.value elif isinstance(doc_node, ExprNodes.StringNode): doc = doc_node.unicode_value if doc is None: doc = doc_node.value else: doc = doc_node.value return doc, node def p_code(s, level=None, ctx=Ctx): body = p_statement_list(s, ctx(level = level), first_statement = 1) if s.sy != 'EOF': s.error("Syntax error in statement [%s,%s]" % ( repr(s.sy), repr(s.systring))) return body _match_compiler_directive_comment = cython.declare(object, re.compile( r"^#\s*cython\s*:\s*((\w|[.])+\s*=.*)$").match) def p_compiler_directive_comments(s): result = {} while s.sy == 'commentline': m = _match_compiler_directive_comment(s.systring) if m: directives = m.group(1).strip() try: result.update(Options.parse_directive_list( directives, ignore_unknown=True)) except ValueError, e: s.error(e.args[0], fatal=False) s.next() return result def p_module(s, pxd, full_module_name, ctx=Ctx): pos = s.position() directive_comments = p_compiler_directive_comments(s) s.parse_comments = False if 'language_level' in directive_comments: s.context.set_language_level(directive_comments['language_level']) doc = p_doc_string(s) if pxd: level = 'module_pxd' else: level = 'module' body = p_statement_list(s, ctx(level=level), first_statement = 1) if s.sy != 'EOF': s.error("Syntax error in statement [%s,%s]" % ( repr(s.sy), repr(s.systring))) return ModuleNode(pos, doc = doc, body = body, full_module_name = full_module_name, directive_comments = directive_comments) def p_cpp_class_definition(s, pos, ctx): # s.sy == 'cppclass' s.next() module_path = [] class_name = p_ident(s) cname = p_opt_cname(s) if cname is None and ctx.namespace is not None: cname = ctx.namespace + "::" + class_name if s.sy == '.': error(pos, "Qualified class name not allowed C++ class") if s.sy == '[': s.next() templates = [p_ident(s)] while s.sy == ',': s.next() templates.append(p_ident(s)) s.expect(']') else: templates = None if s.sy == '(': s.next() base_classes = [p_c_base_type(s, templates = templates)] while s.sy == ',': s.next() base_classes.append(p_c_base_type(s, templates = templates)) s.expect(')') else: base_classes = [] if s.sy == '[': error(s.position(), "Name options not allowed for C++ class") nogil = p_nogil(s) if s.sy == ':': s.next() s.expect('NEWLINE') s.expect_indent() attributes = [] body_ctx = Ctx(visibility = ctx.visibility, level='cpp_class', nogil=nogil or ctx.nogil) body_ctx.templates = templates while s.sy != 'DEDENT': if s.systring == 'cppclass': attributes.append( p_cpp_class_definition(s, s.position(), body_ctx)) elif s.sy != 'pass': attributes.append( p_c_func_or_var_declaration(s, s.position(), body_ctx)) else: s.next() s.expect_newline("Expected a newline") s.expect_dedent() else: attributes = None s.expect_newline("Syntax error in C++ class definition") return Nodes.CppClassNode(pos, name = class_name, cname = cname, base_classes = base_classes, visibility = ctx.visibility, in_pxd = ctx.level == 'module_pxd', attributes = attributes, templates = templates) def print_parse_tree(f, node, level, key = None): from types import ListType, TupleType from Nodes import Node ind = " " * level if node: f.write(ind) if key: f.write("%s: " % key) t = type(node) if t is tuple: f.write("(%s @ %s\n" % (node[0], node[1])) for i in xrange(2, len(node)): print_parse_tree(f, node[i], level+1) f.write("%s)\n" % ind) return elif isinstance(node, Node): try: tag = node.tag except AttributeError: tag = node.__class__.__name__ f.write("%s @ %s\n" % (tag, node.pos)) for name, value in node.__dict__.items(): if name != 'tag' and name != 'pos': print_parse_tree(f, value, level+1, name) return elif t is list: f.write("[\n") for i in xrange(len(node)): print_parse_tree(f, node[i], level+1) f.write("%s]\n" % ind) return f.write("%s%s\n" % (ind, node))
class Node(object): """ double linked list node """ def __init__(self, value, keys): self.value = value self.keys = keys self.prev = None self.next = None class LinkedList(object): def __init__(self): self.head, self.tail = Node(0, set()), Node(0, set()) self.head.next, self.tail.prev = self.tail, self.head def insert(self, pos, node): node.prev, node.next = pos.prev, pos pos.prev.next, pos.prev = node, node return node def erase(self, node): node.prev.next, node.next.prev = node.next, node.prev del node def empty(self): return self.head.next is self.tail def begin(self): return self.head.next def end(self): return self.tail def front(self): return self.head.next def back(self): return self.tail.prev class AllOne(object): def __init__(self): """ Initialize your data structure here. """ self.bucket_of_key = {} self.buckets = LinkedList() def inc(self, key): """ Inserts a new key <Key> with value 1. Or increments an existing key by 1. :type key: str :rtype: void """ if key not in self.bucket_of_key: self.bucket_of_key[key] = self.buckets.insert(self.buckets.begin(), Node(0, set([key]))) bucket, next_bucket = self.bucket_of_key[key], self.bucket_of_key[key].next if next_bucket is self.buckets.end() or next_bucket.value > bucket.value+1: next_bucket = self.buckets.insert(next_bucket, Node(bucket.value+1, set())) next_bucket.keys.add(key) self.bucket_of_key[key] = next_bucket bucket.keys.remove(key) if not bucket.keys: self.buckets.erase(bucket) def dec(self, key): """ Decrements an existing key by 1. If Key's value is 1, remove it from the data structure. :type key: str :rtype: void """ if key not in self.bucket_of_key: return bucket, prev_bucket = self.bucket_of_key[key], self.bucket_of_key[key].prev self.bucket_of_key.pop(key, None) if bucket.value > 1: if bucket is self.buckets.begin() or prev_bucket.value < bucket.value-1: prev_bucket = self.buckets.insert(bucket, Node(bucket.value-1, set())) prev_bucket.keys.add(key) self.bucket_of_key[key] = prev_bucket bucket.keys.remove(key) if not bucket.keys: self.buckets.erase(bucket) def getMaxKey(self): """ Returns one of the keys with maximal value. :rtype: str """ if self.buckets.empty(): return "" return iter(self.buckets.back().keys).next() def getMinKey(self): """ Returns one of the keys with Minimal value. :rtype: str """ if self.buckets.empty(): return "" return iter(self.buckets.front().keys).next()
import Tkinter as tk import ttk import platform def quit(): global tkTop tkTop.destroy() tkTop = tk.Tk() tkTop.geometry('500x300') tkLabelTop = tk.Label(tkTop, text=" http://hello-python.blogspot.com ") tkLabelTop.pack() notebook = ttk.Notebook(tkTop) frame1 = ttk.Frame(notebook) frame2 = ttk.Frame(notebook) notebook.add(frame1, text='Frame One') notebook.add(frame2, text='Frame Two') notebook.pack() tkButtonQuit = tk.Button( tkTop, text="Quit", command=quit) tkButtonQuit.pack() tkDummyButton = tk.Button( frame1, text="Dummy Button") tkDummyButton.pack() tkLabel = tk.Label(frame1, text=" Hello Python!") tkLabel.pack() strVersion = "running Python version " + platform.python_version() tkLabelVersion = tk.Label(frame2, text=strVersion) tkLabelVersion.pack() strPlatform = "Platform: " + platform.platform() tkLabelPlatform = tk.Label(frame2, text=strPlatform) tkLabelPlatform.pack() tk.mainloop()
""" Wrap your code with a time limit to prevent something from taking too long (getting into an infinite loop, etc.) **Examples** >>> from timeout import timeout >>> with timeout(seconds=3): >>> do something Taken and slightly modified from Thomas Ahle at: <http://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish> """ import errno import os import signal class TimeoutError(Exception): pass class timeout: def __init__(self, seconds=1, minutes=None, error_message='Timeout'): self.seconds = seconds if minutes is not None: self.seconds = minutes*60 self.error_message = error_message def handle_timeout(self, signum, frame): raise TimeoutError(self.error_message) def __enter__(self): signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(self.seconds) def __exit__(self, type, value, traceback): signal.alarm(0)
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('people', '0004_auto_20150706_1547'), ] operations = [ migrations.AddField( model_name='contributorlistpage', name='people_per_page', field=models.IntegerField(default=20), ), ]
import sys, string if len(sys.argv) >= 4: file = open(sys.argv[1], "r") csid = sys.argv[2] name = sys.argv[3] if len(sys.argv) == 4: dceid = "omniCodeSet::ID_" + csid else: dceid = sys.argv[4] else: sys.stderr.write("Usage: %s <file> <csid> <name> [dce id]\n" % sys.argv[0]) sys.exit(1) to_u = [0] * 256 fr_u = {} bank = [0] * 256 while 1: line = file.readline() if not line: break if line[0] == "#": continue sl = string.split(line) if (len(sl) < 2 or sl[0][:2] != "0x" or sl[1][:2] != "0x"): sys.stderr.write("Don't understand line: " + string.join(sl, "\t") + "\n") continue c = string.atoi(sl[0], 0) u = string.atoi(sl[1], 0) to_u[c] = u fr_u[u] = c bank[u >> 8] = 1 sys.stdout.write("""\ // -*- Mode: C++; -+- // // Code set table automatically generated from: // // %s // OMNI_NAMESPACE_BEGIN(omni) static const omniCodeSet::UniChar toUCS[] = {""" % sys.argv[1]) for i in range(256): if i % 8 == 0: sys.stdout.write("\n ") sys.stdout.write(" 0x%04x," % to_u[i]) print "\n};\n" for i in range(256): if bank[i]: sys.stdout.write("static const _CORBA_Char frUCS%02x[] = {" % i) add = i << 8 for j in range(256): if j % 8 == 0: sys.stdout.write("\n ") sys.stdout.write(" 0x%02x," % fr_u.get(add + j, 0)) print "\n};\n" sys.stdout.write("""\ static const _CORBA_Char* frUCS[] = {""") for i in range(256): if i % 8 == 0: sys.stdout.write("\n ") if bank[i]: sys.stdout.write(" frUCS%02x," % i) else: sys.stdout.write(" E_T,") print "\n};\n" spacer = " " * len(csid) print """\ static omniCodeSet::NCS_C_8bit _NCS_C_%(csid)s(%(dceid)s, %(spacer)s"%(name)s", %(spacer)stoUCS, frUCS); static omniCodeSet::TCS_C_8bit _TCS_C_%(csid)s(%(dceid)s, %(spacer)s"%(name)s", %(spacer)somniCodeSetUtil::GIOP12, %(spacer)stoUCS, frUCS); class CS_%(csid)s_init { public: CS_%(csid)s_init() { omniCodeSet::registerNCS_C(&_NCS_C_%(csid)s); omniCodeSet::registerTCS_C(&_TCS_C_%(csid)s); } }; static CS_%(csid)s_init _CS_%(csid)s_init_; OMNI_NAMESPACE_END(omni) OMNI_EXPORT_LINK_FORCE_SYMBOL(CS_%(csid)s); """ % vars()
"""Test that the toolchain can build executables. Multiple build tools and languages are supported. If an emulator is available, its ability to run the generated executables is also tested. """ import argparse import glob import os import shutil import subprocess import sys import tempfile def test_none_build_system(build_dir, language, source, linker_flags, exe_suffix): build_cmd = list() if language == 'C': compiler = os.getenv('CC', 'cc') elif language == 'C++': compiler = os.getenv('CXX', 'c++') else: print('Unknown language: ' + language) return 1 build_cmd.append(compiler) if linker_flags: build_cmd.extend(linker_flags) build_cmd.append(source) build_cmd.append('-o') build_cmd.append('a.out' + exe_suffix) print('Building ' + source + ' by calling ' + compiler + '...') print(' '.join(build_cmd)) sys.stdout.flush() return subprocess.call(build_cmd) def test_cmake_build_system(build_dir, language, source, emulator, linker_flags, exe_suffix): shutil.copy(source, build_dir) print('Building ' + source + ' with CMake...') with open('CMakeLists.txt', 'w') as fp: fp.write('cmake_minimum_required(VERSION 3.0)\n') fp.write('project(test-compiler)\n') fp.write('add_executable(a.out ' + os.path.basename(source) + ')\n') if emulator: fp.write('enable_testing()\n') fp.write('add_test(NAME emulator-in-cmake COMMAND a.out)\n') os.mkdir('build') os.chdir('build') cmake_configuration_cmd = ['cmake', '..'] if linker_flags: cmake_configuration_cmd.insert(1, '-DCMAKE_EXE_LINKER_FLAGS="{0}"'.format(' '.join(linker_flags))) print(' '.join(cmake_configuration_cmd)) sys.stdout.flush() if subprocess.call(cmake_configuration_cmd): return 1 if subprocess.call(['make', 'VERBOSE=1']): return 1 if emulator: if subprocess.call(['ctest']): return 1 shutil.copy('a.out' + exe_suffix, build_dir) return 0 def test_source(source, language, build_system, emulator, linker_flags, exe_suffix, debug): result = 0 cwd = os.getcwd() build_dir = tempfile.mkdtemp() os.chdir(build_dir) if build_system == 'None': result += test_none_build_system(build_dir, language, source, linker_flags, exe_suffix) elif build_system == 'CMake': result += test_cmake_build_system(build_dir, language, source, emulator, linker_flags, exe_suffix) else: print('Unknown build system: ' + build_system) result += 1 if emulator: cmd = emulator cmd += ' ' + os.path.join(build_dir, 'a.out' + exe_suffix) print('Running ' + cmd + '...') sys.stdout.flush() result += subprocess.call(cmd, shell=True) os.chdir(cwd) if not debug: print('Deleting temporary build directory ' + build_dir) shutil.rmtree(build_dir) else: print('Keeping temporary build directory ' + build_dir) sys.stdout.flush() return result def test_build_system(test_dir, language, build_system, emulator, linker_flags, exe_suffix, debug): print('\n\n--------------------------------------------------------') print('Testing ' + build_system + ' build system with the ' + language + ' language\n') sys.stdout.flush() result = 0 for source in glob.glob(os.path.join(test_dir, language, '*')): result += test_source(source, language, build_system, emulator, linker_flags, exe_suffix, debug) return result def test_language(test_dir, language, build_systems, emulator, linker_flags, exe_suffix, debug): result = 0 for build_system in build_systems: result += test_build_system(test_dir, language, build_system, emulator, linker_flags, exe_suffix, debug) return result def run_tests(test_dir, languages=('C', 'C++'), build_systems=('None', 'CMake'), emulator=None, linker_flags=None, exe_suffix='', debug=False): """Run the tests found in test_dir where each directory corresponds to an entry in languages. Every source within a language directory is built. The output executable is also run with the emulator if provided.""" result = 0 for language in languages: result += test_language(test_dir, language, build_systems, emulator, linker_flags, exe_suffix, debug) return result if __name__ == '__main__': parser = argparse.ArgumentParser( description='Test the cross-compiler toolchain.') parser.add_argument('--languages', '-l', nargs='+', default=['C', 'C++'], help='Languages to test. Options: C C++') parser.add_argument('--build-systems', '-b', nargs='+', default=['None', 'CMake'], help='Build systems to test. Options: None CMake') parser.add_argument('--emulator', '-e', help='Emulator used to test generated executables') parser.add_argument('--linker-flags', '-w', nargs='+', help='Extra compilation linker flags') parser.add_argument('--exe-suffix', '-s', default='', help='Suffix for generated executables') parser.add_argument('--debug', '-d', action='store_true', help='Do not remove temporary build directory') args = parser.parse_args() test_dir = os.path.dirname(os.path.abspath(__file__)) sys.exit(run_tests(test_dir, languages=args.languages, build_systems=args.build_systems, emulator=args.emulator, linker_flags=args.linker_flags, exe_suffix=args.exe_suffix, debug=args.debug) != 0)
"""Persistent identifier minters.""" from __future__ import absolute_import import uuid from .providers import DepositProvider def deposit_minter(record_uuid, data): """Mint a deposit identifier. A PID with the following characteristics is created: .. code-block:: python { "object_type": "rec", "object_uuid": record_uuid, "pid_value": "<new-pid-value>", "pid_type": "depid", } The following deposit meta information are updated: .. code-block:: python deposit['_deposit'] = { "id": "<new-pid-value>", "status": "draft", } :param record_uuid: Record UUID. :param data: Record content. :returns: A :class:`invenio_pidstore.models.PersistentIdentifier` object. """ provider = DepositProvider.create( object_type='rec', object_uuid=record_uuid, pid_value=uuid.uuid4().hex, ) data['_deposit'] = { 'id': provider.pid.pid_value, 'status': 'draft', } return provider.pid
import codecs import collections import re """ This script van generate a dictionary of language names. This dictionary looks as follows: language_names = { "C": { "nl": "Dutch", "de": "German", "en": "English", }, "nl": { "nl": "Nederlands", "de": "Duits", "en": "Engels", }, } Etcetera. It can be created from: - the 'all_languages' file that is part of KDE (currently the only option). This generate.py script writes the dictionary to a file named data.py. This script needs not to be installed to be able to use the language_names package. """ lang_names = [ "C", "en", "de", "fr", "es", "nl", "pl", "pt_BR", "cs", "ru", "hu", "gl", "it", "tr", "uk", "ja", "zh_CN", "zh_HK", "zh_TW", ] def generate_kde(fileName="/usr/share/locale/all_languages"): """Uses the KDE file to extract language names. Returns the dictionary. All strings are in unicode form. """ langs = collections.defaultdict(dict) group = None with codecs.open(fileName, "r", "utf-8") as langfile: for line in langfile: line = line.strip() m = re.match(r"\[([^]]+)\]", line) if m: group = m.group(1) elif group and group != 'x-test': m = re.match(r"Name(?:\[([^]]+)\])?\s*=(.*)$", line) if m: lang, name = m.group(1) or "C", m.group(2) langs[lang][group] = name # correct KDE mistake langs["cs"]["gl"] = "Galicijský" langs["zh_HK"]["gl"] = "加利西亞語" langs["zh_HK"]["zh_HK"] = "繁體中文(香港)" return dict(langs) def makestring(text): """Returns the text wrapped in quotes, usable as Python input (expecting unicode_literals).""" return '"' + re.sub(r'([\\"])', r'\\\1', text) + '"' def write_dict(langs): """Writes the dictionary file to the 'data.py' file.""" keys = sorted(filter(lambda k: k in langs, lang_names) if lang_names else langs) with codecs.open("data.py", "w", "utf-8") as output: output.write("# -*- coding: utf-8;\n\n") output.write("# Do not edit, this file is generated. See generate.py.\n") output.write("\n\n") output.write("language_names = {\n") for key in keys: output.write('{0}: {{\n'.format(makestring(key))) for lang in sorted(langs[key]): output.write(' {0}:{1},\n'.format(makestring(lang), makestring(langs[key][lang]))) output.write('},\n') output.write("}\n\n# End of data.py\n") if __name__ == "__main__": langs = generate_kde() langs['zh'] = langs['zh_CN'] write_dict(langs)
import logging _logger = logging.getLogger('read-etexts-activity') supported = True try: import gst gst.element_factory_make('espeak') from speech_gst import * _logger.info('use gst-plugins-espeak') except Exception, e: _logger.info('disable gst-plugins-espeak: %s' % e) try: from speech_dispatcher import * _logger.info('use speech-dispatcher') except Exception, e: supported = False _logger.info('disable speech: %s' % e) voice = 'default' pitch = 0 rate = 0 highlight_cb = None end_text_cb = None reset_cb = None
import sys sys.path.append("/usr/share/rhn/") from up2date_client import rhnserver from up2date_client import up2dateAuth from up2date_client import pkgUtils from actions import packages __rhnexport__ = [ 'update'] ACTION_VERSION = 2 def __getErrataInfo(errata_id): s = rhnserver.RhnServer() return s.errata.getErrataInfo(up2dateAuth.getSystemId(), errata_id) def update(errataidlist, cache_only=None): packagelist = [] if type(errataidlist) not in [type([]), type(())]: errataidlist = [ errataidlist ] for errataid in errataidlist: tmpList = __getErrataInfo(errataid) packagelist = packagelist + tmpList current_packages_with_arch = {} current_packages ={} for p in pkgUtils.getInstalledPackageList(getArch=1): current_packages_with_arch[p['name']+p['arch']] = p current_packages[p['name']] = p u = {} # only update packages that are currently installed # since an "applicable errata" may only contain some packages # that actually apply. aka kernel. Fun fun fun. if len(packagelist[0]) > 4: # Newer sats send down arch, filter using name+arch for p in packagelist: if current_packages_with_arch.has_key(p[0]+p[4]): u[p[0]+p[4]] = p elif current_packages_with_arch.has_key(p[0]+"noarch"): u[p[0]+p[4]] = p elif p[4] == "noarch" and current_packages.has_key(p[0]): u[p[0]] = p else: # 5.2 and older sats + hosted dont send arch for p in packagelist: if current_packages.has_key(p[0]): u[p[0]] = p # XXX: Fix me - once we keep all errata packages around, # this is the WRONG thing to do - we want to keep the specific versions # that the user has asked for. packagelist = map(lambda a: u[a], u.keys()) if packagelist == []: data = {} data['version'] = "0" data['name'] = "errata.update.no_packages" data['erratas'] = errataidlist return (39, "No packages from that errata are available", data) return packages.update(packagelist, cache_only) def main(): print update([23423423]) if __name__ == "__main__": main()
"""ROI and ROIList classes for storing and manipulating regions of interests (ROIs). ROI.ROI objects allow for storing of ROIs as either as a boolean mask of included pixels, or as multiple polygons. Masks need not be continuous and an ROI can be defined by multiple non-adjacent polygons. In addition, each ROI can be assigned any number or 'tags' used to define features of the ROIs, as well as an 'id' which is used for clustering or aligning ROIs across ROILists. ROI.ROIList objects are a list-like container for storing multiple ROIs and includes methods for saving, loading, and sub-grouping. """ from builtins import filter from builtins import str from builtins import zip from builtins import range from builtins import object from scipy.sparse import lil_matrix, issparse import numpy as np import pickle as pickle import itertools as it from itertools import product from datetime import datetime from warnings import warn from shapely.geometry import MultiPolygon, Polygon, Point from skimage.measure import find_contours import sima.misc import sima.misc.imagej import os import glob import re import scipy.io from future import standard_library standard_library.install_aliases() class NonBooleanMask(Exception): pass class ROI(object): """Structure used to store ROIs Parameters ---------- mask : 2D or 3D array or list of 2D arrays or of sparse matrices, optional A boolean mask in which all non-zero values define the region of interest. Masks are assumed to follow a (z, y, x) convention, corresponding to (plane, row, column) in the image. polygons: array_like, optional Either an Nx2 or Nx3 np.array (single polygon), a list of array_like objects (multiple polygons), a single shapely Polygon class instance, or a list of shapely Polygon class instances. Because polygons are stored internally as a shapely MultiPolygon, coordinates in this argument should follow an (x, y) or (x, y, z) convention. label : str, optional A label associated with the ROI for reference tags : list of str, optional A list of tags associated with the ROI. id : str, optional A unique identifier for the ROI. By default, the ROI will not have a unique identifier. im_shape: 2- or 3-tuple, optional The shape of the image on which the ROI is drawn. If initialized with a mask, should be None, since im_shape will default to shape of the mask. Elements should correspond to (z, y, x), equivalent to (nPlanes, nRows, nCols) Raises ------ NonBooleanMask Raised when you try to get a polygon representation of a non-boolean mask. See Also -------- sima.ROI.ROIList Notes ----- ROI class instance must be initialized with either a mask or polygons (not both). If initialized with a polygon, im_shape must be defined before the ROI can be converted to a mask. By convention polygon points are assumed to designate the top-left corner of a pixel (see example). Examples -------- >>> import numpy as np >>> from sima.ROI import ROI >>> roi = ROI(polygons=[[0, 0], [0, 1], [1, 1], [1, 0]], im_shape=(2, 2)) >>> roi.coords [array([[ 0., 0., 0.], [ 0., 1., 0.], [ 1., 1., 0.], [ 1., 0., 0.], [ 0., 0., 0.]])] >>> np.array(roi) array([[[ True, False], [False, False]]], dtype=bool) Attributes ---------- id : string The unique identifier for the ROI. tags : set of str The set of tags associated with the ROI. label : string A label associated with the ROI. mask : array A mask defining the region of interest. polygons : MultiPolygon A MultiPolygon representation of the ROI. coords : list of arrays Coordinates of the polygons as a list of Nx3 arrays (x, y, z) im_shape : 3-tuple The shape of the image associated with the ROI (z, y, x). Determines the shape of the mask. size : int The number of non-zero pixel-weights in the ROI mask. """ def __init__(self, mask=None, polygons=None, label=None, tags=None, id=None, im_shape=None): if (mask is None) == (polygons is None): raise TypeError('ROI: ROI must be initialized with either a mask \ or a polygon, not both and not neither') self.im_shape = im_shape if mask is not None: self.mask = mask else: self._mask = None if polygons is not None: self.polygons = polygons else: self._polys = None self.id = id self.tags = tags self.label = label def __str__(self): return '<ROI: label={label}>'.format(label=self.label) def __repr__(self): return '<ROI: ' + \ 'label={label}, id={id}, type={type}, im_shape={im_shape}'.format( label=self.label, id=self.id, type='mask' if self._mask is not None else 'poly', im_shape=self.im_shape) def todict(self, type=None): """Returns the data in the ROI as a dictionary. ROI(**roi.todict()) will return a new ROI equivalent to the original roi Parameters ---------- type : {'mask','polygons'}, optional If specified, convert the type of each ROI in the list prior to saving """ if type == 'mask': self.mask = self.mask elif type == 'polygons': self.polygons = self.polygons polygons = None if self._polys is None else self.coords return {'mask': self._mask, 'polygons': polygons, 'id': self._id, 'label': self._label, 'tags': self._tags, 'im_shape': self._im_shape} @property def id(self): return self._id @id.setter def id(self, i): if i is None: self._id = None else: self._id = str(i) @property def tags(self): return self._tags @tags.setter def tags(self, t): if t is None: self._tags = set() else: self._tags = set(t) @property def label(self): return self._label @label.setter def label(self, l): if l is None: self._label = None else: self._label = str(l) @property def polygons(self): if self._polys is not None: return self._polys for m in self._mask: if not np.all((np.array(m.todense() == 0) | np.array(m.todense() == 1))): raise NonBooleanMask( 'Unable to convert a non-boolean mask to polygons') return mask2poly(self._mask) @polygons.setter def polygons(self, polygons): self._polys = _reformat_polygons(polygons) self._mask = None @property def coords(self): coords = [] for polygon in self.polygons: coords.append(np.array(polygon.exterior.coords)) return coords @property def mask(self): if self._mask is None and self.im_shape is None: raise Exception('Polygon ROIs must have an im_shape set') if self._mask is not None: masks = [] for z_idx, mask in enumerate(self._mask): if mask.shape == self.im_shape[1:]: masks.append(mask) else: m = lil_matrix(self.im_shape[1:], dtype=mask.dtype) values = mask.nonzero() for row, col in zip(*values): if row < self.im_shape[1] and col < self.im_shape[2]: m[row, col] = mask[row, col] masks.append(m) if z_idx + 1 == self.im_shape[0]: break # Note: length of output = self.im_shape[0] while len(masks) < self.im_shape[0]: masks.append(lil_matrix(self.im_shape[1:], dtype=mask.dtype)) return masks return poly2mask(polygons=self.polygons, im_size=self.im_shape) @mask.setter def mask(self, mask): self._mask = _reformat_mask(mask) self._polys = None def __array__(self): """Obtain a numpy.ndarray representation of the ROI mask. Returns ------- mask : numpy.ndarray An array representation of the ROI mask. """ return np.array([plane.todense() for plane in self.mask]) @property def size(self): return sum(np.count_nonzero(plane.todense()) for plane in self.mask) @property def im_shape(self): if self._im_shape is not None: return self._im_shape if self._mask is not None: z = len(self._mask) y = np.amax([x.shape[0] for x in self._mask]) x = np.amax([x.shape[1] for x in self._mask]) return (z, y, x) return None @im_shape.setter def im_shape(self, shape): if shape is None: self._im_shape = None else: if len(shape) == 3: self._im_shape = tuple(shape) elif len(shape) == 2: self._im_shape = (1,) + tuple(shape) class ROIList(list): """A list-like container for storing multiple ROIs. This class retains all the functionality inherited from Python's built-in `list <https://docs.python.org/2/library/functions.html#list>`_ class. Parameters ---------- rois : list of sima.ROI.ROI The ROIs in the set. timestamp : , optional The time at which the ROIList was created. Defaults to the current time. See also -------- sima.ROI.ROI Attributes ---------- timestamp : string The timestamp for when the ROIList was created. """ def __init__(self, rois, timestamp=None): def convert(roi): if isinstance(roi, dict): return ROI(**roi) else: return roi list.__init__(self, [convert(roi) for roi in rois]) self.timestamp = timestamp @classmethod def load(cls, path, label=None, fmt='pkl', reassign_label=False): """Initialize an ROIList from either a saved pickle file or an Imagej ROI zip file. Parameters ---------- path : string Path to either a pickled ROIList, an ImageJ ROI zip file, or the path to the direcotry containing the 'IC filter' .mat files for inscopix/mosaic data. label : str, optional The label for selecting the ROIList if multiple ROILists have been saved in the same file. By default, the most recently saved ROIList will be selected. fmt : {'pkl', 'ImageJ', 'inscopix'} The file format being imported. reassign_label: boolean If true, assign ascending integer strings as labels Returns ------- sima.ROI.ROIList Returns an ROIList loaded from the passed in path. """ if fmt == 'pkl': with open(path, 'rb') as f: roi_sets = pickle.load(f) if label is None: label = sima.misc.most_recent_key(roi_sets) try: rois = roi_sets[label] except KeyError: raise Exception( 'No ROIs with were saved with the given label.') roi_list = cls(**rois) elif fmt == 'ImageJ': roi_list = cls(rois=sima.misc.imagej.read_imagej_roi_zip(path)) elif fmt == 'inscopix': dirnames = next(os.walk(path))[1] # this naming convetion for ROI masks is used in Mosiac 1.0.0b files = [glob.glob(os.path.join(path, dirname, '*IC filter*.mat')) for dirname in dirnames] files = filter(lambda f: len(f) > 0, files)[0] rois = [] for filename in files: label = re.findall('\d+', filename)[-1] data = scipy.io.loadmat(filename) # this is the ROI mask index in Mosiac 1.0.0b mask = data['Object'][0][0][11] rois.append(ROI(mask=mask, id=label, im_shape=mask.shape)) roi_list = cls(rois=rois) else: raise ValueError('Unrecognized file format.') if reassign_label: for idx, roi in zip(it.count(), roi_list): roi.label = str(idx) return roi_list def transform(self, transforms, im_shape=None, copy_properties=True): """Apply 2x3 affine transformations to the ROIs Parameters ---------- transforms : list of GeometryTransforms or 2x3 Numpy arrays The affine transformations to be applied to the ROIs. Length of list should equal the number of planes (im_shape[0]). im_shape : 3-element tuple, optional The (zyx) shape of the target image. If None, must be set before any ROI can be converted to a mask. copy_properties : bool, optional Copy the label, id, and tags properties from the source ROIs to the transformed ROIs. Returns ------- sima.ROI.ROIList Returns an ROIList consisting of the transformed ROI objects. """ transformed_rois = [] for roi in self: transformed_polygons = [] for coords in roi.coords: z = coords[0][2] # assuming all coords share a z-coordinate if isinstance(transforms[0], np.ndarray): transformed_coords = [np.dot(transforms[int(z)], np.hstack([vert[:2], 1])) for vert in coords] else: transformed_coords = transforms[int(z)](coords[:, :2]) transformed_coords = [np.hstack((coords, z)) for coords in transformed_coords] transformed_polygons.append(transformed_coords) transformed_roi = ROI( polygons=transformed_polygons, im_shape=im_shape) if copy_properties: transformed_roi.label = roi.label transformed_roi.id = roi.id transformed_roi.tags = roi.tags transformed_rois.append(transformed_roi) return ROIList(rois=transformed_rois) def __str__(self): return ("<ROI set: nROIs={nROIs}, timestamp={timestamp}>").format( nROIs=len(self), timestamp=self.timestamp) def __repr__(self): return super(ROIList, self).__repr__().replace( '\n', '\n ') def save(self, path, label=None, save_type=None): """Save an ROI set to a file. The file can contain multiple ROIList objects with different associated labels. If the file already exists, the ROIList will be added without deleting the others. Parameters ---------- path : str The name of the pkl file to which the ROIList will be saved. label : str, optional The label associated with the ROIList. Defaults to using the timestamp as a label. save_type : {'mask','polygons'}, optional If specified, convert the type of each ROI in the list prior to saving """ time_fmt = '%Y-%m-%d-%Hh%Mm%Ss' timestamp = datetime.strftime(datetime.now(), time_fmt) rois = [roi.todict(type=save_type) for roi in self] try: with open(path, 'rb') as f: data = pickle.load(f) except IOError: data = {} if label is None: label = timestamp data[label] = {'rois': rois, 'timestamp': timestamp} with open(path, 'wb') as f: pickle.dump(data, f, pickle.HIGHEST_PROTOCOL) def subset(self, tags=None, neg_tags=None): """Filter the ROIs in the set based on the ROI tags. Parameters ---------- tags : list of strings, optional Only ROIs that contain all of the tags will be included. neg_tags : list of strings, optional Only ROIs that contain none of the neg_tags will be included. Returns ------- sima.ROI.ROIList New ROIList with all filtered ROIs. """ if tags is None: tags = [] if neg_tags is None: neg_tags = [] rois = [r for r in self if all(t in r.tags for t in tags) and not any(t in r.tags for t in neg_tags)] return ROIList(rois) def poly2mask(polygons, im_size): """Converts polygons to a sparse binary mask. >>> from sima.ROI import poly2mask >>> poly1 = [[0,0], [0,1], [1,1], [1,0]] >>> poly2 = [[0,1], [0,2], [2,2], [2,1]] >>> mask = poly2mask([poly1, poly2], (3, 3)) >>> mask[0].todense() matrix([[ True, False, False], [ True, True, False], [False, False, False]], dtype=bool) Parameters ---------- polygons : sequence of coordinates or sequence of Polygons A sequence of polygons where each is either a sequence of (x,y) or (x,y,z) coordinate pairs, an Nx2 or Nx3 numpy array, or a Polygon object. im_size : tuple Final size of the resulting mask Output ------ mask A list of sparse binary masks of the points contained within the polygons, one mask per plane """ if len(im_size) == 2: im_size = (1,) + im_size polygons = _reformat_polygons(polygons) mask = np.zeros(im_size, dtype=bool) for poly in polygons: # assuming all points in the polygon share a z-coordinate z = int(np.array(poly.exterior.coords)[0][2]) if z > im_size[0]: warn('Polygon with zero-coordinate {} '.format(z) + 'cropped using im_size = {}'.format(im_size)) continue x_min, y_min, x_max, y_max = poly.bounds # Shift all points by 0.5 to move coordinates to corner of pixel shifted_poly = Polygon(np.array(poly.exterior.coords)[:, :2] - 0.5) points = [Point(x, y) for x, y in product(np.arange(int(x_min), np.ceil(x_max)), np.arange(int(y_min), np.ceil(y_max)))] points_in_poly = list(filter(shifted_poly.contains, points)) for point in points_in_poly: xx, yy = point.xy x = int(xx[0]) y = int(yy[0]) if 0 <= y < im_size[1] and 0 <= x < im_size[2]: mask[z, y, x] = True masks = [] for z_coord in np.arange(mask.shape[0]): masks.append(lil_matrix(mask[z_coord, :, :])) return masks def mask2poly(mask, threshold=0.5): """Takes a mask and returns a MultiPolygon Parameters ---------- mask : array Sparse or dense array to identify polygon contours within. threshold : float, optional Threshold value used to separate points in and out of resulting polygons. 0.5 will partition a boolean mask, for an arbitrary value binary mask choose the midpoint of the low and high values. Output ------ MultiPolygon Returns a MultiPolygon of all masked regions. """ mask = _reformat_mask(mask) verts_list = [] for z, m in enumerate(mask): if issparse(m): m = np.array(m.astype('byte').todense()) if (m != 0).sum() == 0: # If the plane is empty, just skip it continue # Add an empty row and column around the mask to make sure edge masks # are correctly determined expanded_dims = (m.shape[0] + 2, m.shape[1] + 2) expanded_mask = np.zeros(expanded_dims, dtype=float) expanded_mask[1:m.shape[0] + 1, 1:m.shape[1] + 1] = m verts = find_contours(expanded_mask.T, threshold) # Subtract off 1 to shift coords back to their real space, # but also add 0.5 to move the coordinates back to the corners, # so net subtract 0.5 from every coordinate verts = [np.subtract(x, 0.5).tolist() for x in verts] v = [] for poly in verts: new_poly = [point + [z] for point in poly] v.append(new_poly) verts_list.extend(v) return _reformat_polygons(verts_list) def _reformat_polygons(polygons): """Convert polygons to a MulitPolygon Accepts one more more sequence of 2- or 3-element sequences or a sequence of shapely Polygon objects. Parameters ---------- polygons : sequence of 2- or 3-element coordinates or sequence of Polygons Polygon(s) to be converted to a MulitPolygon. Coordinates are used to initialize a shapely MultiPolygon, and thus should follow a (x, y, z) coordinate space convention. Returns ------- MultiPolygon """ if len(polygons) == 0: # Just return an empty MultiPolygon return MultiPolygon([]) elif isinstance(polygons, Polygon): polygons = [polygons] elif isinstance(polygons[0], Polygon): # polygons is already a list of polygons pass else: # We got some sort of sequence of sequences, ensure it has the # correct depth and convert to Polygon objects try: Polygon(polygons[0]) except (TypeError, AssertionError): polygons = [polygons] new_polygons = [] for poly in polygons: # Polygon.simplify with tolerance=0 will return the exact same # polygon with co-linear points removed new_polygons.append(Polygon(poly).simplify(tolerance=0)) polygons = new_polygons # Polygon.exterior.coords is not settable, need to initialize new objects z_polygons = [] for poly in polygons: if poly.has_z: z_polygons.append(poly) else: warn('Polygon initialized without z-coordinate. ' + 'Assigning to zeroth plane (z = 0)') z_polygons.append( Polygon([point + (0,) for point in poly.exterior.coords])) return MultiPolygon(z_polygons) def _reformat_mask(mask): """Convert mask to a list of sparse matrices (scipy.sparse.lil_matrix) Accepts a 2 or 3D array, a list of 2D arrays, or a sequence of sparse matrices. Parameters ---------- mask : a 2 or 3 dimensional numpy array, a list of 2D numpy arrays, or a sequence of sparse matrices. Masks are assumed to follow a (z, y, x) convention. If mask is a list of 2D arrays or of sparse matrices, each element is assumed to correspond to the mask for a single plane (and is assumed to follow a (y, x) convention) """ if isinstance(mask, np.ndarray): # user passed in a 2D or 3D np.array if mask.ndim == 2: mask = [lil_matrix(mask, dtype=mask.dtype)] elif mask.ndim == 3: new_mask = [] for s in range(mask.shape[0]): new_mask.append(lil_matrix(mask[s, :, :], dtype=mask.dtype)) mask = new_mask else: raise ValueError('numpy ndarray must be either 2 or 3 dimensions') elif issparse(mask): # user passed in a single lil_matrix mask = [lil_matrix(mask)] else: new_mask = [] for plane in mask: new_mask.append(lil_matrix(plane, dtype=plane.dtype)) mask = new_mask return mask
from PyQt4.QtCore import QAbstractTableModel, Qt, QVariant, QModelIndex class ObjectclassTableModel(QAbstractTableModel): def __init__(self, parent = None): QAbstractTableModel.__init__(self, parent) self.templateObject = None def setTemplateObject(self, templateObject = None): self.beginResetModel() self.templateObject = templateObject self.endResetModel() def insertRow(self, objectclass): if self.templateObject: self.beginInsertRows(QModelIndex(), self.rowCount(), self.rowCount()) self.templateObject.addObjectclass(objectclass) self.endInsertRows() return True return False def removeRows(self, indexes): # We must get the objectclasses before we make any of their indexes invalid if self.templateObject: objectclasses = map(self.getObjectclass, indexes) for o in objectclasses: self.beginRemoveRows(QModelIndex(), self.getIndexRow(o), self.getIndexRow(o)) self.templateObject.deleteObjectclass(objectclass = o) self.endRemoveRows() return True return False def getObjectclass(self, index): if index.isValid(): return index.internalPointer() return QVariant() def getIndexRow(self, objectclass): return self.templateObject.objectclassIndex(objectclass) def rowCount(self,parent = QModelIndex()): #Number of objectclass if self.templateObject: return self.templateObject.getCountObjectclasses() return 0 def columnCount(self,parent = QModelIndex()): return 1 def flags(self, index): return Qt.ItemIsSelectable | Qt.ItemIsEnabled def data(self,index,role = Qt.DisplayRole): """ Handles getting the correct data from the TemplateObject and returning it """ if not index.isValid(): return QVariant() row = index.row() if role == Qt.DisplayRole and self.templateObject: # return the objectclass in the given row return self.templateObject.objectclasses[row] def index(self, row, column, parent): if row < 0 or column < 0: return QModelIndex() if row >= self.rowCount() or column >= self.columnCount(): return QModelIndex() internalPointer = self.templateObject.objectclasses[row] return self.createIndex(row, column, internalPointer)
""" * sVimPy - small Virtual interpreting machine for Python * (c) 2012 by Tim Theede aka Pez2001 <pez2001@voyagerproject.de> / vp * * python arduino api wrapper * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * THIS SOFTWARE IS SUPPLIED AS IT IS WITHOUT ANY WARRANTY! * """ INPUT = 0 OUTPUT = 1 LOW = 0 HIGH = 1 def pinMode(pin,mode):pass def digitalRead(pin):pass def digitalWrite(pin,value):pass def analogRead(pin):pass def analogWrite(pin,value):pass def delay(ms):pass def serialPrintln(message):pass def serialBegin(baudrate):pass
from argparse import Action, ArgumentDefaultsHelpFormatter, ArgumentTypeError from glob import glob from importlib import import_module from os.path import basename, dirname from reportclient import set_verbosity from abrtcli import config from abrtcli.i18n import _ class Command: aliases = [] description = None name = None def __init__(self, subparsers): self._parser = subparsers.add_parser(self.name, aliases=self.aliases, help=self.description, description=self.description, formatter_class=ArgumentDefaultsHelpFormatter) self._parser.set_defaults(func=self.run) # Clone of CountAction in argparse with additional bells and whistles. class VerbosityAction(Action): def __init__(self, option_strings, dest): super().__init__(option_strings=option_strings, dest=dest, nargs=0, default=0, required=False, help=_('increase output verbosity')) def __call__(self, parser, namespace, values, option_string=None): verbosity = getattr(namespace, self.dest, 0) + 1 setattr(namespace, self.dest, verbosity) set_verbosity(verbosity) self._parser.add_argument('-v', '--verbose', action=VerbosityAction) @property def parser(self): return self._parser def add_filter_arguments(self): group = self._parser.add_argument_group() def uint(string): index = int(string) if index < 1: raise ArgumentTypeError(_('positive non-zero integer expected')) return index group.add_argument('-N', type=uint, dest='n_latest', metavar='COUNT', help=_('filter last N problems')) group.add_argument('-c', '--component', action='append', type=str, dest='components', metavar='COMPONENT', help=_('filter problems with matching component')) group.add_argument('-n', '--not-reported', action='store_true', help=_('filter unreported problems')) group.add_argument('-s', '--since', type=int, metavar='TIMESTAMP', help=_('filter problems newer than the specified timestamp')) group.add_argument('-u', '--until', type=int, metavar='TIMESTAMP', help=_('filter problems older than the specified timestamp')) group.add_argument('-x', '--executable', type=str, dest='executables', metavar='EXECUTABLE', help=_('filter problems with matching executable')) def add_format_arguments(self, default_pretty='full'): group = self._parser.add_mutually_exclusive_group() group.add_argument('--format', type=str, help=_('output format')) group.add_argument('--pretty', type=str, choices=config.FORMATS, default=default_pretty, help=_('built-in output format')) def add_match_argument(self): self._parser.add_argument('patterns', nargs='*', type=str, metavar='PATTERN', help=_('path to the problem directory, problem ID or wildcard')) def run(self, arguments): pass _commands = {} def wildcard_assumed(arguments): if not hasattr(arguments, 'patterns'): return False return (getattr(arguments, 'n_latest', 0) or getattr(arguments, 'components', None) or getattr(arguments, 'not_reported', False) or getattr(arguments, 'since', 0) or getattr(arguments, 'until', 0)) def run_command(command, arguments): _commands[command].run(arguments) def load_commands(subparsers): for command in Command.__subclasses__(): instance = command(subparsers) _commands[instance.name] = instance module_names = [basename(path[:-3]) for path in glob('%s/*.py' % (dirname(__file__))) if path != __file__] for name in module_names: import_module(".%s" % (name), package=__name__)
CODES = [ ('', ''), ('011111', u'011111 - CULTIVO DE ARROZ'), ('011112', u'011112 - CULTIVO DE TRIGO'), ('011119', u'011119 - CULTIVO DE CEREALES EXCEPTO LOS FORRAJEROS Y LAS SEMILLAS N.C.P.'), ('011121', u'011121 - CULTIVO DE MAIZ'), ('011122', u'011122 - CULTIVO DE SORGO GRANIFERO'), ('011129', u'011129 - CULTIVO DE CEREALES FORRAJEROS N.C.P.'), ('011131', u'011131 - CULTIVO DE SOJA'), ('011132', u'011132 - CULTIVO DE GIRASOL'), ('011139', u'011139 - CULTIVO DE OLEAGINOSAS N.C.P.'), ('011140', u'011140 - CULTIVO DE PASTOS FORRAJEROS'), ('011210', u'011210 - CULTIVO DE PAPA,BATATA Y MANDIOCA'), ('011221', u'011221 - CULTIVO DE TOMATE'), ('011229', u'011229 - CULTIVO DE BULBOS,BROTES,RAICES Y HORTALIZAS DE FRUTOS N.C.P.'), ('011230', u'011230 - CULTIVO DE HORTALIZAS DE HOJA Y DE OTRAS HORTALIZAS FRESCAS'), ('011241', u'011241 - CULTIVO DE LEGUMBRES FRESCAS'), ('011242', u'011242 - CULTIVO DE LEGUMBRES SECAS'), ('011251', u'011251 - CULTIVO DE FLORES'), ('011252', u'011252 - CULTIVO DE PLANTAS ORNAMENTALES'), ('011311', u'011311 - CULTIVO DE MANZANA Y PERA'), ('011319', u'011319 - CULTIVO DE FRUTAS DE PEPITA N.C.P.'), ('011320', u'011320 - CULTIVO DE FRUTAS DE CAROZO'), ('011330', u'011330 - CULTIVO DE FRUTAS CITRICAS'), ('011340', u'011340 - CULTIVO DE NUECES Y FRUTAS SECAS'), ('011390', u'011390 - CULTIVO DE FRUTAS N.C.P.'), ('011411', u'011411 - CULTIVO DE ALGODON'), ('011419', u'011419 - CULTIVO DE PLANTAS PARA LA OBTENCION DE FIBRAS N.C.P.'), ('011421', u'011421 - CULTIVO DE CAÑA DE AZUCAR'), ('011429', u'011429 - CULTIVO DE PLANTAS SACARIFERAS N.C.P.'), ('011430', u'011430 - CULTIVO DE VID PARA VINIFICAR'), ('011440', u'011440 - CULTIVO DE TE,YERBA MATE Y OTRAS PLANTAS CUYAS HOJAS SE UTILIZAN PARA PREPARAR BEBIDAS'), ('011450', u'011450 - CULTIVO DE TABACO'), ('011460', u'011460 - CULTIVO DE ESPECIAS'), ('011490', u'011490 - CULTIVOS INDUSTRIALES N.C.P.'), ('011511', u'011511 - PRODUCCION DE SEMILLAS HIBRIDAS DE CEREALES Y OLEAGINOSAS'), ('011512', u'011512 - PRODUCCION DE SEMILLAS VARIETALES O AUTOFECUNDADAS DE CEREALES,OLEAGINOSAS,Y FORRAJERAS'), ('011513', u'011513 - PRODUCCION DE SEMILLAS DE HORTALIZAS Y LEGUMBRES,FLORES Y PLANTAS ORNAMENTALES Y ARBOLES FRUTALES'), ('011519', u'011519 - PRODUCCION DE SEMILLAS DE CULTIVOS AGRICOLAS N.C.P.'), ('011520', u'011520 - PRODUCCION DE OTRAS FORMAS DE PROPAGACION DE CULTIVOS AGRICOLAS'), ('012111', u'012111 - CRIA DE GANADO BOVINO -EXCEPTO EN CABAÑAS Y PARA LA PRODUCCION DE LECHE-'), ('012112', u'012112 - INVERNADA DE GANADO BOVINO EXCEPTO EL ENGORDE EN CORRALES'), ('012113', u'012113 - ENGORDE EN CORRALES'), ('012120', u'012120 - CRIA DE GANADO OVINO,EXCEPTO EN CABAÑAS Y PARA LA PRODUCCION DE LANA'), ('012130', u'012130 - CRIA DE GANADO PORCINO,EXCEPTO EN CABAÑAS'), ('012140', u'012140 - CRIA DE GANADO EQUINO,EXCEPTO EN HARAS'), ('012150', u'012150 - CRIA DE GANADO CAPRINO,EXCEPTO EN CABAÑAS Y PARA PRODUCCION DE LECHE'), ('012161', u'012161 - CRIA DE GANADO BOVINO EN CABAÑAS'), ('012162', u'012162 - CRIA DE GANADO OVINO,PORCINO Y CAPRINO EN CABAÑAS'), ('012163', u'012163 - CRIA DE GANADO EQUINO EN HARAS'), ('012169', u'012169 - CRIA EN CABAÑAS DE GANADO N.C.P.'), ('012171', u'012171 - PRODUCCION DE LECHE DE GANADO BOVINO'), ('012179', u'012179 - PRODUCCION DE LECHE DE GANADO N.C.P.'), ('012181', u'012181 - PRODUCCION DE LANA'), ('012182', u'012182 - PRODUCCION DE PELOS'), ('012190', u'012190 - CRIA DE GANADO N.C.P.'), ('012211', u'012211 - CRIA DE AVES PARA PRODUCCION DE CARNE'), ('012212', u'012212 - CRIA DE AVES PARA PRODUCCION DE HUEVOS'), ('012220', u'012220 - PRODUCCION DE HUEVOS'), ('012230', u'012230 - APICULTURA'), ('012241', u'012241 - CRIA DE ANIMALES PARA LA OBTENCION DE PIELES Y CUEROS'), ('012242', u'012242 - CRIA DE ANIMALES PARA LA OBTENCION DE PELOS'), ('012243', u'012243 - CRIA DE ANIMALES PARA LA OBTENCION DE PLUMAS'), ('012290', u'012290 - CRIA DE ANIMALES Y OBTENCION DE PRODUCTOS DE ORIGEN ANIMAL,N.C.P.'), ('014111', u'014111 - SERVICIOS DE LABRANZA,SIEMBRA,TRANSPLANTE Y CUIDADOS CULTURALES'), ('014112', u'014112 - SERVICIOS DE PULVERIZACION,DESINFECCION Y FUMIGACION AEREA Y TERRESTRE,EXCEPTO LA MANUAL'), ('014119', u'014119 - SERVICIOS DE MAQUINARIA AGRICOLA N.C.P.,EXCEPTO LOS DE COSECHA MECANICA'), ('014120', u'014120 - SERVICIOS DE COSECHA MECANICA'), ('014130', u'014130 - SERVICIOS DE CONTRATISTAS DE MANO DE OBRA AGRICOLA'), ('014190', u'014190 - SERVICIOS AGRICOLAS N.C.P'), ('014210', u'014210 - INSEMINACION ARTIFICIAL Y SERVICIOS N.C.P.PARA MEJORAR LA REPRODUCCION DE LOS ANIMALES Y EL RENDIMIENTO DE SUS PRODUCTOS'), ('014220', u'014220 - SERVICIOS DE CONTRATISTAS DE MANO DE OBRA PECUARIA'), ('014291', u'014291 - SERVICIOS PARA EL CONTROL DE PLAGAS,BAÑOS PARASITICIDAS,ETC.'), ('014292', u'014292 - ALBERGUE Y CUIDADO DE ANIMALES DE TERCEROS'), ('014299', u'014299 - SERVICIOS PECUARIOS N.C.P.,EXCEPTO LOS VETERINARIOS'), ('015010', u'015010 - CAZA Y CAPTURA DE ANIMALES VIVOS Y REPOBLACION DE ANIMALES DE CAZA'), ('015020', u'015020 - SERVICIOS PARA LA CAZA'), ('020110', u'020110 - PLANTACION DE BOSQUES'), ('020120', u'020120 - REPOBLACION Y CONSERVACION DE BOSQUES NATIVOS Y ZONAS FORESTADAS'), ('020130', u'020130 - EXPLOTACION DE VIVEROS FORESTALES'), ('020210', u'020210 - EXTRACCION DE PRODUCTOS FORESTALES DE BOSQUES CULTIVADOS'), ('020220', u'020220 - EXTRACCION DE PRODUCTOS FORESTALES DE BOSQUES NATIVOS'), ('020310', u'020310 - SERVICIOS FORESTALES DE EXTRACCION DE MADERA'), ('020390', u'020390 - SERVICIOS FORESTALES EXCEPTO LOS RELACIONADOS CON LA EXTRACCION DE MADERA'), ('050110', u'050110 - PESCA MARITIMA,COSTERA Y DE ALTURA'), ('050120', u'050120 - PESCA CONTINENTAL,FLUVIAL Y LACUSTRE'), ('050130', u'050130 - RECOLECCION DE PRODUCTOS MARINOS'), ('050200', u'050200 - EXPLOTACION DE CRIADEROS DE PECES,GRANJAS PISCICOLAS Y OTROS FRUTOS ACUATICOS'), ('050300', u'050300 - SERVICIOS PARA LA PESCA'), ('101000', u'101000 - EXTRACCION Y AGLOMERACION DE CARBON'), ('102000', u'102000 - EXTRACCION Y AGLOMERACION DE LIGNITO'), ('103000', u'103000 - EXTRACCION Y AGLOMERACION DE TURBA'), ('111000', u'111000 - EXTRACCION DE PETROLEO CRUDO Y GAS NATURAL'), ('112000', u'112000 - ACTIVIDADES DE SERVICIOS RELACIONADAS CON LA EXTRACCION DE PETROLEO Y GAS,EXCEPTO LAS ACTIVIDADES DE PROSPECCION'), ('120000', u'120000 - EXTRACCION DE MINERALES Y CONCENTRADOS DE URANIO Y TORIO'), ('131000', u'131000 - EXTRACCION DE MINERALES DE HIERRO'), ('132000', u'132000 - EXTRACCION DE MINERALES METALIFEROS NO FERROSOS,EXCEPTO MINERALES DE URANIO Y TORIO'), ('141100', u'141100 - EXTRACCION DE ROCAS ORNAMENTALES'), ('141200', u'141200 - EXTRACCION DE PIEDRA CALIZA Y YESO'), ('141300', u'141300 - EXTRACCION DE ARENAS,CANTO RODADO Y TRITURADOS PETREOS'), ('141400', u'141400 - EXTRACCION DE ARCILLA Y CAOLIN'), ('142110', u'142110 - EXTRACCION DE MINERALES PARA LA FABRIC.DE ABONOS EXCEPTO TURBA.'), ('142120', u'142120 - EXTRACCION DE MINERALES PARA LA FABRIC.DE PRODUCTOS QUIMICOS'), ('142200', u'142200 - EXTRACCION DE SAL EN SALINAS Y DE ROCA'), ('142900', u'142900 - EXPLOTACION DE MINAS Y CANTERAS N.C.P.'), ('151111', u'151111 - MATANZA DE GANADO BOVINO'), ('151112', u'151112 - PROCESAMIENTO DE CARNE DE GANADO BOVINO'), ('151113', u'151113 - SALADERO Y PELADERO DE CUEROS DE GANADO BOVINO'), ('151120', u'151120 - MATANZA Y PROCESAMIENTO DE CARNE DE AVES'), ('151130', u'151130 - ELABORACION DE FIAMBRES Y EMBUTIDOS'), ('151140', u'151140 - MATANZA DE GANADO EXCEPTO EL BOVINO Y PROCESAMIENTO DE SU CARNE'), ('151191', u'151191 - FABRICACION DE ACEITES Y GRASAS DE ORIGEN ANIMAL'), ('151199', u'151199 - MATANZA DE ANIMALES N.C.P.Y PROCESAMIENTO DE SU CARNE,ELABORACION DE SUBPRODUCTOS CARNICOS N.C.P.'), ('151201', u'151201 - ELABORACION DE PESCADOS DE MAR,CRUSTACEOS Y PRODUCTOS MARINOS N.C.P.'), ('151202', u'151202 - ELABORACION DE PESCADOS DE RIOS Y LAGUNAS Y OTROS PRODUCTOS FLUVIALES Y LACUSTRES'), ('151203', u'151203 - FABRICACION DE ACEITES,GRASAS,HARINAS Y PRODUCTOS A BASE DE PESCADOS N.C.P.'), ('151310', u'151310 - PREPARACION DE CONSERVAS DE FRUTAS,HORTALIZAS Y LEGUMBRES'), ('151320', u'151320 - ELABORACION DE JUGOS NATURALES Y SUS CONCENTRADOS,DE FRUTAS,HORTALIZAS Y LEGUMBRES'), ('151330', u'151330 - ELABORACION Y ENVASADO DE DULCES,MERMELADAS Y JALEAS'), ('151340', u'151340 - ELABORACION DE FRUTAS,HORTALIZAS Y LEGUMBRES CONGELADAS'), ('151390', u'151390 - ELABORACION DE FRUTAS,HORTALIZAS Y LEGUMBRES DESHIDRATADAS O DESECADAS,PREPARACION N.C.P.DE FRUTAS,HORTALIZAS Y LEGUMBRES'), ('151410', u'151410 - ELABORACION DE ACEITES Y GRASAS VEGETALES SIN REFINAR Y SUS SUBPRODUCTOS,ELABORACION DE ACEITE VIRGEN'), ('151420', u'151420 - ELABORACION DE ACEITES Y GRASAS VEGETALES REFINADAS'), ('151430', u'151430 - ELABORACION DE MARGARINAS Y GRASAS VEGETALES COMESTIBLES SIMILARES'), ('152010', u'152010 - ELABORACION DE LECHES Y PRODUCTOS LACTEOS DESHIDRATADOS'), ('152020', u'152020 - ELABORACION DE QUESOS'), ('152030', u'152030 - ELABORACION INDUSTRIAL DE HELADOS'), ('152090', u'152090 - ELABORACION DE PRODUCTOS LACTEOS N.C.P.'), ('153110', u'153110 - MOLIENDA DE TRIGO'), ('153120', u'153120 - PREPARACION DE ARROZ'), ('153131', u'153131 - ELABORACION DE ALIMENTOS A BASE DE CEREALES'), ('153139', u'153139 - PREPARACION Y MOLIENDA DE LEGUMBRES Y CEREALES N.C.P.'), ('153200', u'153200 - ELABORACION DE ALMIDONES Y PRODUCTOS DERIVADOS DEL ALMIDON'), ('153300', u'153300 - ELABORACION DE ALIMENTOS PREPARADOS PARA ANIMALES'), ('154110', u'154110 - ELABORACION DE GALLETITAS Y BIZCOCHOS'), ('154120', u'154120 - ELABORACION INDUSTRIAL DE PRODUCTOS DE PANADERIA,EXCLUIDO GALLETITAS Y BIZCOCHOS'), ('154191', u'154191 - ELABORACION DE MASAS Y PRODUCTOS DE PASTELERIA'), ('154199', u'154199 - ELABORACION DE PRODUCTOS DE PANADERIA N.C.P.'), ('154200', u'154200 - ELABORACION DE AZUCAR'), ('154301', u'154301 - ELABORACION DE CACAO,CHOCOLATE Y PRODUCTOS A BASE DE CACAO'), ('154309', u'154309 - ELABORACION DE PRODUCTOS DE CONFITERIA N.C.P.'), ('154410', u'154410 - ELABORACION DE PASTAS ALIMENTARIAS FRESCAS'), ('154420', u'154420 - ELABORACION DE PASTAS ALIMENTARIAS SECAS'), ('154911', u'154911 - TOSTADO,TORRADO Y MOLIENDA DE CAFE'), ('154912', u'154912 - ELABORACION Y MOLIENDA DE HIERBAS AROMATICAS Y ESPECIAS'), ('154920', u'154920 - PREPARACION DE HOJAS DE TE'), ('154930', u'154930 - ELABORACION DE YERBA MATE'), ('154991', u'154991 - ELABORACION DE EXTRACTOS,JARABES Y CONCENTRADOS'), ('154992', u'154992 - ELABORACION DE VINAGRES'), ('154999', u'154999 - ELABORACION DE PRODUCTOS ALIMENTICIOS N.C.P.'), ('155110', u'155110 - DESTILACION DE ALCOHOL ETILICO'), ('155120', u'155120 - DESTILACION,RECTIFICACION Y MEZCLA DE BEBIDAS ESPIRITOSAS'), ('155210', u'155210 - ELABORACION DE VINOS'), ('155290', u'155290 - ELABORACION DE SIDRA Y OTRAS BEBIDAS ALCOHOLICAS FERMENTADAS A PARTIR DE FRUTAS'), ('155300', u'155300 - ELABORACION DE CERVEZA,BEBIDAS MALTEADAS Y DE MALTA'), ('155411', u'155411 - EMBOTELLADO DE AGUAS NATURALES Y MINERALES'), ('155412', u'155412 - FABRICACION DE SODAS'), ('155420', u'155420 - ELABORACION DE BEBIDAS GASEOSAS,EXCEPTO SODA'), ('155490', u'155490 - ELABORACION DE HIELO,JUGOS ENVASADOS PARA DILUIR Y OTRAS BEBIDAS NO ALCOHOLICAS'), ('160010', u'160010 - PREPARACION DE HOJAS DE TABACO'), ('160091', u'160091 - ELABORACION DE CIGARRILLOS'), ('160099', u'160099 - ELABORACION DE PRODUCTOS DE TABACO N.C.P.'), ('171111', u'171111 - DESMOTADO DE ALGODON,PREPARACION DE FIBRAS DE ALGODON'), ('171112', u'171112 - PREPARACION DE FIBRAS TEXTILES VEGETALES EXCEPTO DE ALGODON'), ('171120', u'171120 - PREPARACION DE FIBRAS ANIMALES DE USO TEXTIL,INCLUSO LAVADO DE LANA'), ('171131', u'171131 - FABRICACION DE HILADOS DE LANA Y SUS MEZCLAS'), ('171132', u'171132 - FABRICACION DE HILADOS DE ALGODON Y SUS MEZCLAS'), ('171139', u'171139 - FABRICACION DE HILADOS TEXTILES EXCEPTO DE LANA Y DE ALGODON'), ('171141', u'171141 - FABRICACION DE TEJIDOS (TELAS) PLANOS DE LANA Y SUS MEZCLAS'), ('171142', u'171142 - FABRICACION DE TEJIDOS (TELAS) PLANOS DE ALGODON Y SUS MEZCLAS'), ('171143', u'171143 - FABRICACION DE TEJIDOS (TELAS) PLANOS DE FIBRAS MANUFACTURADAS Y SEDA'), ('171148', u'171148 - FABRICACION DE TEJIDOS (TELAS) PLANOS DE FIBRAS TEXTILES N.C.P.'), ('171149', u'171149 - FABRICACION DE PRODUCTOS DE TEJEDURIA N.C.P.'), ('171200', u'171200 - ACABADO DE PRODUCTOS TEXTILES'), ('172101', u'172101 - FABRICACION DE FRAZADAS,MANTAS,PONCHOS,COLCHAS,COBERTORES,ETC.'), ('172102', u'172102 - FABRICACION DE ROPA DE CAMA Y MANTELERIA'), ('172103', u'172103 - FABRICACION DE ART. DE LONA Y SUCEDANEOS DE LONA'), ('172104', u'172104 - FABRICACION DE BOLSAS DE MATERIALES TEXTILES PARA PRODUCTOS A GRANEL'), ('172109', u'172109 - FABRICACION DE ART. CONFECCIONADOS DE MATERIALES TEXTILES EXCEPTO PRENDAS DE VESTIR N.C.P.'), ('172200', u'172200 - FABRICACION DE TAPICES Y ALFOMBRAS'), ('172300', u'172300 - FABRICACION DE CUERDAS,CORDELES,BRAMANTES Y REDES'), ('172900', u'172900 - FABRICACION DE PRODUCTOS TEXTILES N.C.P.'), ('173010', u'173010 - FABRICACION DE MEDIAS'), ('173020', u'173020 - FABRICACION DE SUETERES Y ART. SIMILARES DE PUNTO'), ('173090', u'173090 - FABRICACION DE TEJIDOS Y ART. DE PUNTO N.C.P.'), ('181110', u'181110 - CONFECCION DE ROPA INTERIOR,PRENDAS PARA DORMIR Y PARA LA PLAYA'), ('181120', u'181120 - CONFECCION DE INDUMENTARIA DE TRABAJO,UNIFORMES,GUARDAPOLVOS Y SUS ACCESORIOS'), ('181130', u'181130 - CONFECCION DE INDUMENTARIA PARA BEBES Y NIÑOS'), ('181191', u'181191 - CONFECCION DE PILOTOS E IMPERMEABLES'), ('181192', u'181192 - FABRICACION DE ACCESORIOS DE VESTIR EXCEPTO DE CUERO'), ('181199', u'181199 - CONFECCION DE PRENDAS DE VESTIR N.C.P.,EXCEPTO LAS DE PIEL,CUERO Y SUCEDANEOS,PILOTOS E IMPERMEABLES'), ('181201', u'181201 - FABRICACION DE ACCESORIOS DE VESTIR DE CUERO'), ('181202', u'181202 - CONFECCION DE PRENDAS DE VESTIR DE CUERO'), ('182001', u'182001 - CONFECCION DE PRENDAS DE VESTIR DE PIEL Y SUCEDANEOS'), ('182009', u'182009 - TERMINACION Y TEÑIDO DE PIELES,FABRIC.DE ART. DE PIEL N.C.P.'), ('191100', u'191100 - CURTIDO Y TERMINACION DE CUEROS'), ('191200', u'191200 - FABRICACION DE MALETAS,BOLSOS DE MANO Y SIMILARES,ART. DE TALABARTERIA Y ART. DE CUERO N.C.P.'), ('192010', u'192010 - FABRICACION DE CALZADO DE CUERO,EXCEPTO EL ORTOPEDICO'), ('192020', u'192020 - FABRICACION DE CALZADO DE TELA,PLASTICO,GOMA,CAUCHO Y OTROS MATERIALES,EXCEPTO CALZADO ORTOPEDICO Y DE ASBESTO'), ('192030', u'192030 - FABRICACION DE PARTES DE CALZADO'), ('201000', u'201000 - ASERRADO Y CEPILLADO DE MADERA'), ('202100', u'202100 - FABRICACION DE HOJAS DE MADERA PARA ENCHAPADO,FABRIC.DE TABLEROS CONTRACHAPADOS,TABLEROS LAMINADOS,TABLEROS DE PARTICULAS Y TABLEROS Y PANELES N.C.P.'), ('202201', u'202201 - FABRICACION DE ABERTURAS Y ESTRUCTURAS DE MADERA PARA LA CONSTRUCCION'), ('202202', u'202202 - FABRICACION DE VIVIENDAS PREFABRICADAS DE MADERA'), ('202300', u'202300 - FABRICACION DE RECIPIENTES DE MADERA'), ('202901', u'202901 - FABRICACION DE ART. DE CESTERIA,CAÑA Y MIMBRE'), ('202902', u'202902 - FABRICACION DE ATAUDES'), ('202903', u'202903 - FABRICACION DE ART. DE MADERA EN TORNERIAS'), ('202904', u'202904 - FABRICACION DE PRODUCTOS DE CORCHO'), ('202909', u'202909 - FABRICACION DE PRODUCTOS DE MADERA N.C.P'), ('210101', u'210101 - FABRICACION DE PULPA DE MADERA'), ('210102', u'210102 - FABRICACION DE PAPEL Y CARTON EXCEPTO ENVASES'), ('210201', u'210201 - FABRICACION DE ENVASES DE PAPEL'), ('210202', u'210202 - FABRICACION DE ENVASES DE CARTON'), ('210910', u'210910 - FABRICACION DE ART. DE PAPEL Y CARTON DE USO DOMESTICO E HIGIENICO SANITARIO'), ('210990', u'210990 - FABRICACION DE ART. DE PAPEL Y CARTON N.C.P.'), ('221100', u'221100 - EDICION DE LIBROS,FOLLETOS,PARTITURAS Y OTRAS PUBLICACIONES'), ('221200', u'221200 - EDICION DE PERIODICOS,REVISTAS Y PUBLICACIONES PERIODICAS'), ('221300', u'221300 - EDICION DE GRABACIONES'), ('221900', u'221900 - EDICION N.C.P.'), ('222101', u'222101 - IMPRESION DE DIARIOS Y REVISTAS'), ('222109', u'222109 - IMPRESION EXCEPTO DE DIARIOS Y REVISTAS'), ('222200', u'222200 - SERVICIOS RELACIONADOS CON LA IMPRESION'), ('223000', u'223000 - REPRODUCCION DE GRABACIONES'), ('231000', u'231000 - FABRICACION DE PRODUCTOS DE HORNOS DE COQUE'), ('232000', u'232000 - FABRICACION DE PRODUCTOS DE LA REFINACION DEL PETROLEO'), ('233000', u'233000 - FABRICACION DE COMBUSTIBLE NUCLEAR'), ('241110', u'241110 - FABRICACION DE GASES COMPRIMIDOS Y LICUADOS.'), ('241120', u'241120 - FABRICACION DE CURTIENTES NATURALES Y SINTETICOS.'), ('241130', u'241130 - FABRICACION DE MATERIAS COLORANTES BASICAS,EXCEPTO PIGMENTOS PREPARADOS.'), ('241180', u'241180 - FABRICACION DE MATERIAS QUIMICAS INORGANICAS BASICAS N.C.P.'), ('241190', u'241190 - FABRICACION DE MATERIAS QUIMICAS ORGANICAS BASICAS N.C.P.'), ('241200', u'241200 - FABRICACION DE ABONOS Y COMPUESTOS DE NITROGENO'), ('241301', u'241301 - FABRICACION DE RESINAS Y CAUCHOS SINTETICOS'), ('241309', u'241309 - FABRICACION DE MATERIAS PLASTICAS EN FORMAS PRIMARIAS N.C.P.'), ('242100', u'242100 - FABRICACION DE PLAGUICIDAS Y PRODUCTOS QUIMICOS DE USO AGROPECUARIO'), ('242200', u'242200 - FABRICACION DE PINTURAS,BARNICES Y PRODUCTOS DE REVESTIMIENTO SIMILARES,TINTAS DE IMPRENTA Y MASILLAS'), ('242310', u'242310 - FABRICACION DE MEDICAMENTOS DE USO HUMANO Y PRODUCTOS FARMACEUTICOS'), ('242320', u'242320 - FABRICACION DE MEDICAMENTOS DE USO VETERINARIO'), ('242390', u'242390 - FABRICACION DE PRODUCTOS DE LABORATORIO, SUSTANCIAS QUIMICAS MEDICINALES Y PRODUCTOS BOTANICOS N.C.P.'), ('242411', u'242411 - FABRICACION DE PREPARADOS PARA LIMPIEZA,PULIDO Y SANEAMIENTO'), ('242412', u'242412 - FABRICACION DE JABONES Y DETERGENTES'), ('242490', u'242490 - FABRICACION DE COSMETICOS,PERFUMES Y PRODUCTOS DE HIGIENE Y TOCADOR'), ('242901', u'242901 - FABRICACION DE TINTAS'), ('242902', u'242902 - FABRICACION DE EXPLOSIVOS,MUNICIONES Y PRODUCTOS DE PIROTECNIA'), ('242903', u'242903 - FABRICACION DE COLAS,ADHESIVOS,APRESTOS Y CEMENTOS EXCEPTO LOS ODONTOLOGICOS OBTENIDOS DE SUSTANCIAS MINERALES Y VEGETALES'), ('242909', u'242909 - FABRICACION DE PRODUCTOS QUIMICOS N.C.P.'), ('243000', u'243000 - FABRICACION DE FIBRAS MANUFACTURADAS'), ('251110', u'251110 - FABRICACION DE CUBIERTAS Y CAMARAS'), ('251120', u'251120 - RECAUCHUTADO Y RENOVACION DE CUBIERTAS'), ('251901', u'251901 - FABRICACION DE AUTOPARTES DE CAUCHO EXCEPTO CAMARAS Y CUBIERTAS'), ('251909', u'251909 - FABRICACION DE PRODUCTOS DE CAUCHO N.C.P.'), ('252010', u'252010 - FABRICACION DE ENVASES PLASTICOS'), ('252090', u'252090 - FABRICACION DE PRODUCTOS PLASTICOS EN FORMAS BASICAS Y ART. DE PLASTICO N.C.P.,EXCEPTO MUEBLES'), ('261010', u'261010 - FABRICACION DE ENVASES DE VIDRIO'), ('261020', u'261020 - FABRICACION Y ELABORACION DE VIDRIO PLANO'), ('261091', u'261091 - FABRICACION DE ESPEJOS Y VITRALES'), ('261099', u'261099 - FABRICACION DE PRODUCTOS DE VIDRIO N.C.P.'), ('269110', u'269110 - FABRICACION DE ART. SANITARIOS DE CERAMICA'), ('269191', u'269191 - FABRICACION DE OBJETOS CERAMICOS PARA USO INDUSTRIAL Y DE LABORATORIO'), ('269192', u'269192 - FABRICACION DE OBJETOS CERAMICOS PARA USO DOMESTICO EXCEPTO ARTEFACTOS SANITARIOS'), ('269193', u'269193 - FABRICACION DE OBJETOS CERAMICOS EXCEPTO REVESTIMIENTOS DE PISOS Y PAREDES N.C.P.'), ('269200', u'269200 - FABRICACION DE PRODUCTOS DE CERAMICA REFRACTARIA'), ('269301', u'269301 - FABRICACION DE LADRILLOS'), ('269302', u'269302 - FABRICACION DE REVESTIMIENTOS CERAMICOS'), ('269309', u'269309 - FABRICACION DE PRODUCTOS DE ARCILLA Y CERAMICA NO REFRACTARIA PARA USO ESTRUCTURAL N.C.P.'), ('269410', u'269410 - ELABORACION DE CEMENTO'), ('269421', u'269421 - ELABORACION DE YESO'), ('269422', u'269422 - ELABORACION DE CAL'), ('269510', u'269510 - FABRICACION DE MOSAICOS'), ('269591', u'269591 - FABRICACION DE ART. DE CEMENTO Y FIBROCEMENTO'), ('269592', u'269592 - FABRICACION DE PREMOLDEADAS PARA LA CONSTRUCCION'), ('269600', u'269600 - CORTE,TALLADO Y ACABADO DE LA PIEDRA'), ('269910', u'269910 - ELABORACION PRIMARIA N.C.P.DE MINERALES NO METALICOS'), ('269990', u'269990 - FABRICACION DE PRODUCTOS MINERALES NO METALICOS N.C.P.'), ('271001', u'271001 - FUNDICION EN ALTOS HORNOS Y ACERIAS.PRODUCCION DE LINGOTES,PLANCHAS O BARRAS'), ('271002', u'271002 - LAMINACION Y ESTIRADO'), ('271009', u'271009 - FABRICACION EN INDUSTRIAS BASICAS DE PRODUCTOS DE HIERRO Y ACERO N.C.P.'), ('272010', u'272010 - ELABORACION DE ALUMINIO PRIMARIO Y SEMIELABORADOS DE ALUMINIO'), ('272090', u'272090 - PRODUCCION DE METALES NO FERROSOS N.C.P.Y SUS SEMIELABORADOS'), ('273100', u'273100 - FUNDICION DE HIERRO Y ACERO'), ('273200', u'273200 - FUNDICION DE METALES NO FERROSOS'), ('281101', u'281101 - FABRICACION DE CARPINTERIA METALICA'), ('281102', u'281102 - FABRICACION DE ESTRUCTURAS METALICAS PARA LA CONSTRUCCION'), ('281200', u'281200 - FABRICACION DE TANQUES,DEPOSITOS Y RECIPIENTES DE METAL'), ('281300', u'281300 - FABRICACION DE GENERADORES DE VAPOR'), ('289100', u'289100 - FORJADO,PRENSADO,ESTAMPADO Y LAMINADO DE METALES,PULVIMETALURGIA'), ('289200', u'289200 - TRATAMIENTO Y REVESTIMIENTO DE METALES,OBRAS DE INGENIERIA MECANICA EN GENERAL REALIZADAS A CAMBIO DE UNA RETRIBUCION O POR CONTRATA'), ('289301', u'289301 - FABRICACION DE HERRAMIENTAS MANUALES Y SUS ACCESORIOS'), ('289302', u'289302 - FABRICACION DE ART. DE CUCHILLERIA Y UTENSILLOS DE MESA Y DE COCINA'), ('289309', u'289309 - FABRICACION DE CERRADURAS,HERRAJES Y ART. DE FERRETERIA N.C.P.'), ('289910', u'289910 - FABRICACION DE ENVASES METALICOS'), ('289991', u'289991 - FABRICACION DE TEJIDOS DE ALAMBRE'), ('289992', u'289992 - FABRICACION DE CAJAS DE SEGURIDAD'), ('289993', u'289993 - FABRICACION DE PRODUCTOS METALICOS DE TORNERIA Y/O MATRICERIA'), ('289999', u'289999 - FABRICACION DE PRODUCTOS METALICOS N.C.P.'), ('291100', u'291100 - FABRICACION DE MOTORES Y TURBINAS, EXCEPTO MOTORES PARA AERONAVES,VEHICULOS AUTOMOTORES Y MOTOCICLETAS'), ('291200', u'291200 - FABRICACION DE BOMBAS,COMPRESORES,GRIFOS Y VALVULAS'), ('291300', u'291300 - FABRICACION DE COJINETES,ENGRANAJES,TRENES DE ENGRANAJE Y PIEZAS DE TRANSMISION'), ('291400', u'291400 - FABRICACION DE HORNOS,HOGARES Y QUEMADORES'), ('291500', u'291500 - FABRICACION DE EQUIPO DE ELEVACION Y MANIPULACION'), ('291900', u'291900 - FABRICACION DE MAQUINARIA DE USO GENERAL N.C.P.'), ('292110', u'292110 - FABRICACION DE TRACTORES'), ('292190', u'292190 - FABRICACION DE MAQUINARIA AGROPECUARIA Y FORESTAL,EXCEPTO TRACTORES'), ('292200', u'292200 - FABRICACION DE MAQUINAS HERRAMIENTA'), ('292300', u'292300 - FABRICACION DE MAQUINARIA METALURGICA'), ('292400', u'292400 - FABRICACION DE MAQUINARIA PARA LA EXPLOTACION DE MINAS Y CANTERAS Y PARA OBRAS DE CONSTRUCCION'), ('292500', u'292500 - FABRICACION DE MAQUINARIA PARA LA ELABORACION DE ALIMENTOS,BEBIDAS Y TABACO'), ('292600', u'292600 - FABRICACION DE MAQUINARIA PARA LA ELABORACION DE PRODUCTOS TEXTILES,PRENDAS DE VESTIR Y CUEROS'), ('292700', u'292700 - FABRICACION DE ARMAS Y MUNICIONES'), ('292901', u'292901 - FABRICACION DE MAQUINARIA PARA LA INDUSTRIA DEL PAPEL Y LAS ARTES GRAFICAS'), ('292909', u'292909 - FABRICACION DE MAQUINARIA DE USO ESPECIAL N.C.P.'), ('293010', u'293010 - FABRICACION DE COCINAS,CALEFONES,ESTUFAS Y CALEFACTORES DE USO DOMESTICO NO ELECTRICOS'), ('293020', u'293020 - FABRICACION DE HELADERAS,"FREEZERS",LAVARROPAS Y SECARROPAS'), ('293091', u'293091 - FABRICACION DE MAQUINAS DE COSER Y TEJER'), ('293092', u'293092 - FABRICACION DE VENTILADORES,EXTRACTORES Y ACONDICIONADORES DE AIRE,ASPIRADORAS Y SIMILARES'), ('293093', u'293093 - FABRICACION DE ENCERADORAS,PULIDORAS,BATIDORAS,LICUADORAS Y SIMILARES'), ('293094', u'293094 - FABRICACION DE PLANCHAS,CALEFACTORES,HORNOS ELECTRICOS,TOSTADORAS Y OTROS APARATOS GENERADORES DE CALOR'), ('293095', u'293095 - FABRICACION DE ARTEFACTOS PARA ILUMINACION EXCEPTO LOS ELECTRICOS'), ('293099', u'293099 - FABRICACION DE APARATOS Y ACCESORIOS ELECTRICOS N.C.P.'), ('300000', u'300000 - FABRICACION DE MAQUINARIA DE OFICINA,CONTABILIDAD E INFORMATICA'), ('311000', u'311000 - FABRICACION DE MOTORES,GENERADORES Y TRANSFORMADORES ELECTRICOS'), ('312000', u'312000 - FABRICACION DE APARATOS DE DISTRIBUCION Y CONTROL DE LA ENERGIA ELECTRICA'), ('313000', u'313000 - FABRICACION DE HILOS Y CABLES AISLADOS'), ('314000', u'314000 - FABRICACION DE ACUMULADORES Y DE PILAS Y BATERIAS PRIMARIAS'), ('315000', u'315000 - FABRICACION DE LAMPARAS ELECTRICAS Y EQUIPO DE ILUMINACION'), ('319000', u'319000 - FABRICACION DE EQUIPO ELECTRICO N.C.P.'), ('321000', u'321000 - FABRICACION DE TUBOS,VALVULAS Y OTROS COMPONENTES ELECTRONICOS'), ('322000', u'322000 - FABRICACION DE TRANSMISORES DE RADIO Y TELEVISION Y DE APARATOS PARA TELEFONIA Y TELEGRAFIA CON HILOS'), ('323000', u'323000 - FABRICACION DE RECEPTORES DE RADIO Y TELEVISION,APARATOS DE GRABACION Y REPRODUCCION DE SONIDO Y VIDEO,Y PRODUCTOS CONEXOS'), ('331100', u'331100 - FABRICACION DE EQUIPO MEDICO Y QUIRURGICO Y DE APARATOS ORTOPEDICOS'), ('331200', u'331200 - FABRICACION DE INSTRUMENTOS Y APARATOS PARA MEDIR,VERIFICAR,ENSAYAR,NAVEGAR Y OTROS FINES,EXCEPTO EL EQUIPO DE CONTROL DE PROCESOS INDUSTRIALES'), ('331300', u'331300 - FABRICACION DE EQUIPO DE CONTROL DE PROCESOS INDUSTRIALES'), ('332001', u'332001 - FABRICACION DE APARATOS Y ACCESORIOS PARA FOTOGRAFIA EXCEPTO PELICULAS,PLACAS Y PAPELES SENSIBLES'), ('332002', u'332002 - FABRICACION DE LENTES Y OTROS ART. OFTALMICOS'), ('332003', u'332003 - FABRICACION DE INSTRUMENTOS DE OPTICA'), ('333000', u'333000 - FABRICACION DE RELOJES'), ('341000', u'341000 - FABRICACION DE VEHICULOS AUTOMOTORES'), ('342000', u'342000 - FABRICACION DE CARROCERIAS PARA VEHICULOS AUTOMOTORES,FABRIC.DE REMOLQUES Y SEMIRREMOLQUES'), ('343000', u'343000 - FABRICACION DE PARTES,PIEZAS Y ACCESORIOS PARA VEHICULOS AUTOMOTORES Y SUS MOTORES'), ('351100', u'351100 - CONSTRUCCION Y REPARACION DE BUQUES'), ('351200', u'351200 - CONSTRUCCION Y REPARACION DE EMBARCACIONES DE RECREO Y DEPORTE'), ('352000', u'352000 - FABRICACION DE LOCOMOTORAS Y DE MATERIAL RODANTE PARA FERROCARRILES Y TRANVIAS'), ('353000', u'353000 - FABRICACION Y REPARACION DE AERONAVES'), ('359100', u'359100 - FABRICACION DE MOTOCICLETAS'), ('359200', u'359200 - FABRICACION DE BICICLETAS Y DE SILLONES DE RUEDAS PARA INVALIDOS'), ('359900', u'359900 - FABRICACION DE EQUIPO DE TRANSPORTE N.C.P.'), ('361010', u'361010 - FABRICACION DE MUEBLES Y PARTES DE MUEBLES,PRINCIPALMENTE DE MADERA'), ('361020', u'361020 - FABRICACION DE MUEBLES Y PARTES DE MUEBLES,PRINCIPALMENTE DE OTROS MATERIALES'), ('361030', u'361030 - FABRICACION DE SOMIERES Y COLCHONES'), ('369101', u'369101 - FABRICACION DE JOYAS Y ART. CONEXOS'), ('369102', u'369102 - FABRICACION DE OBJETOS DE PLATERIA Y ART. ENCHAPADOS'), ('369200', u'369200 - FABRICACION DE INSTRUMENTOS DE MUSICA'), ('369300', u'369300 - FABRICACION DE ART. DE DEPORTE'), ('369400', u'369400 - FABRICACION DE JUEGOS Y JUGUETES'), ('369910', u'369910 - FABRICACION DE LAPICES,LAPICERAS, BOLIGRAFOS,SELLOS Y ART. SIMILARES PARA OFICINAS Y ARTISTAS'), ('369920', u'369920 - FABRICACION DE CEPILLOS Y PINCELES'), ('369991', u'369991 - FABRICACION DE FOSFOROS'), ('369992', u'369992 - FABRICACION DE PARAGUAS'), ('369999', u'369999 - INDUSTRIAS MANUFACTURERAS N.C.P.'), ('371000', u'371000 - RECICLAMIENTO DE DESPERDICIOS Y DESECHOS METALICOS'), ('372000', u'372000 - RECICLAMIENTO DE DESPERDICIOS Y DESECHOS NO METALICOS'), ('401110', u'401110 - GENERACION DE ENERGIA TERMICA CONVENCIONAL'), ('401120', u'401120 - GENERACION DE ENERGIA TERMICA NUCLEAR'), ('401130', u'401130 - GENERACION DE ENERGIA HIDRAULICA'), ('401190', u'401190 - GENERACION DE ENERGIA N.C.P.'), ('401200', u'401200 - TRANSPORTE DE ENERGIA ELECTRICA'), ('401300', u'401300 - DISTRIBUCION DE ENERGIA ELECTRICA'), ('402001', u'402001 - FABRICACION Y DISTRIBUCION DE GAS'), ('402009', u'402009 - FABRICACION Y DISTRIBUCION DE COMBUSTIBLES GASEOSOS N.C.P.'), ('403000', u'403000 - SUMINISTRO DE VAPOR Y AGUA CALIENTE'), ('410010', u'410010 - CAPTACION,DEPURACION Y DISTRIBUCION DE AGUA DE FUENTES SUBTERRANEAS'), ('410020', u'410020 - CAPTACION,DEPURACION Y DISTRIBUCION DE AGUA DE FUENTES SUPERFICIALES'), ('451100', u'451100 - DEMOLICION Y VOLADURA DE EDIFICIOS Y DE SUS PARTES'), ('451200', u'451200 - PERFORACION Y SONDEO EXCEPTO: PERFORACION DE POZOS DE PETROLEO,DE GAS,DE MINAS E HIDRAULICOS Y PROSPECCION DE YACIMIENTOS DE PETROLEO'), ('451900', u'451900 - MOVIMIENTO DE SUELOS Y PREPARACION DE TERRENOS PARA OBRAS N.C.P.'), ('452100', u'452100 - CONSTRUCCION,REFORMA Y REPARACION DE EDIFICIOS RESIDENCIALES'), ('452200', u'452200 - CONSTRUCCION,REFORMA Y REPARACION DE EDIFICIOS NO RESIDENCIALES'), ('452310', u'452310 - CONSTRUCCION,REFORMA Y REPARACION DE OBRAS HIDRAULICAS'), ('452390', u'452390 - CONSTRUCCION,REFORMA Y REPARACION DE OBRAS DE INFRAESTRUCTURA DEL TRANSPORTE N.C.P'), ('452400', u'452400 - CONSTRUCCION,REFORMA Y REPARACION DE REDES'), ('452510', u'452510 - PERFORACION DE POZOS DE AGUA'), ('452520', u'452520 - ACTIVIDADES DE HINCADO DE PILOTES,CIMENTACION Y OTROS TRABAJOS DE HORMIGON ARMADO'), ('452590', u'452590 - ACTIVIDADES ESPECIALIZADAS DE CONSTRUCCION N.C.P.'), ('452900', u'452900 - OBRAS DE INGENIERIA CIVIL N.C.P.'), ('453110', u'453110 - INSTALACIONES DE ASCENSORES,MONTACARGAS Y ESCALERAS MECANICAS'), ('453120', u'453120 - INSTALACION DE SISTEMAS DE ILUMINACION,CONTROL Y SEÑALIZACION ELECTRICA PARA EL TRANSPORTE'), ('453190', u'453190 - EJECUCION Y MANTENIMIENTO DE INSTALACIONES ELECTRICAS Y ELECTRONICAS N.C.P.'), ('453200', u'453200 - AISLAMIENTO TERMICO,ACUSTICO,HIDRICO Y ANTIVIBRATORIO'), ('453300', u'453300 - INSTALACIONES DE GAS,AGUA,SANITARIOS Y DE CLIMATIZACION,CON SUS ARTEFACTOS CONEXOS'), ('453900', u'453900 - INSTALACIONES PARA EDIFICIOS Y OBRAS DE INGENIERIA CIVIL N.C.P.'), ('454100', u'454100 - INSTALACIONES DE CARPINTERIA,HERRERIA DE OBRA Y ARTISTICA'), ('454200', u'454200 - TERMINACION Y REVESTIMIENTO DE PAREDES Y PISOS'), ('454300', u'454300 - COLOCACION DE CRISTALES EN OBRA'), ('454400', u'454400 - PINTURA Y TRABAJOS DE DECORACION'), ('454900', u'454900 - TERMINACION DE EDIFICIOS Y OBRAS DE INGENIERIA CIVIL N.C.P.'), ('455000', u'455000 - ALQUILER DE EQUIPO DE CONSTRUCCION O DEMOLICION DOTADO DE OPERARIOS'), ('501110', u'501110 - VENTA DE AUTOS,CAMIONETAS Y UTILITARIOS,NUEVOS'), ('501190', u'501190 - VENTA DE VEHICULOS AUTOMOTORES,NUEVOS N.C.P.'), ('501210', u'501210 - VENTA DE AUTOS,CAMIONETAS Y UTILITARIOS,USADOS'), ('501290', u'501290 - VENTA DE VEHICULOS AUTOMOTORES,USADOS N.C.P.'), ('502100', u'502100 - LAVADO AUTOMATICO Y MANUAL'), ('502210', u'502210 - REPARACION DE CAMARAS Y CUBIERTAS'), ('502220', u'502220 - REPARACION DE AMORTIGUADORES, ALINEACION DE DIRECCION Y BALANCEO DE RUEDAS'), ('502300', u'502300 - INSTALACION Y REPARACION DE LUNETAS Y VENTANILLAS, ALARMAS,CERRADURAS,RADIOS,SISTEMAS DE CLIMATIZACION AUTOMOTOR Y GRABADO DE CRISTALES'), ('502400', u'502400 - TAPIZADO Y RETAPIZADO'), ('502500', u'502500 - REPARACIONES ELECTRICAS,DEL TABLERO E INSTRUMENTAL,REPARACION Y RECARGA DE BATERIAS'), ('502600', u'502600 - REPARACION Y PINTURA DE CARROCERIAS,COLOCACION DE GUARDABARROS Y PROTECCIONES EXTERIORES'), ('502910', u'502910 - INSTALACION Y REPARACION DE CAÑOS DE ESCAPE'), ('502920', u'502920 - MANTENIMIENTO Y REPARACION DE FRENOS'), ('502990', u'502990 - MANTENIMIENTO Y REPARACION DEL MOTOR N.C.P.,MECANICA INTEGRAL'), ('503100', u'503100 - VENTA AL POR MAYOR DE PARTES,PIEZAS Y ACCESORIOS DE VEHICULOS AUTOMOTORES'), ('503210', u'503210 - VENTA AL POR MENOR DE CAMARAS Y CUBIERTAS'), ('503220', u'503220 - VENTA AL POR MENOR DE BATERIAS'), ('503290', u'503290 - VENTA AL POR MENOR DE PARTES,PIEZAS Y ACCESORIOS EXCEPTO CAMARAS,CUBIERTAS Y BATERIAS'), ('504010', u'504010 - VENTA DE MOTOCICLETAS Y DE SUS PARTES,PIEZAS Y ACCESORIOS'), ('504020', u'504020 - MANTENIMIENTO Y REPARACION DE MOTOCICLETAS'), ('505000', u'505000 - VENTA AL POR MENOR DE COMBUSTIBLE PARA VEHICULOS AUTOMOTORES Y MOTOCICLETAS'), ('511111', u'511111 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE CEREALES'), ('511112', u'511112 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE SEMILLAS'), ('511119', u'511119 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE PRODUCTOS AGRICOLAS N.C.P.'), ('511121', u'511121 - OPERACIONES DE INTERMEDIACION DE GANADO EN PIE.'), ('511122', u'511122 - OPERACIONES DE INTERMEDIACION DE LANAS,CUEROS Y PRODUCTOS AFINES DE TERCEROS.'), ('511911', u'511911 - OPERACIONES DE INTERMEDIACION DE CARNE - CONSIGNATARIO DIRECTO -'), ('511912', u'511912 - OPERACIONES DE INTERMEDIACION DE CARNE EXCEPTO CONSIGNATARIO DIRECTO'), ('511919', u'511919 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE ALIMENTOS,BEBIDAS Y TABACO N.C.P.'), ('511920', u'511920 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE PROD.TEXTILES,PRENDAS DE VESTIR,CALZADO EXCEPTO EL ORTOPEDICO, ART.DE MARROQUINERIA,PARAGUAS,SIMILARES Y PRODUCTOS DE CUERO N.C.P.'), ('511930', u'511930 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE MADERA Y MATERIALES PARA LA CONSTRUCCION'), ('511940', u'511940 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE ENERGIA ELECTRICA,GAS Y COMBUSTIBLES'), ('511950', u'511950 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE MINERALES,METALES Y PRODUCTOS QUIMICOS INDUSTRIALES'), ('511960', u'511960 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE MAQUINARIA,EQUIPO PROFESIONAL INDUSTRIAL Y COMERCIAL,EMBARCACIONES Y AERONAVES'), ('511970', u'511970 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE PAPEL,CARTON,LIBROS,REVISTAS,DIARIOS,MATERIALES DE EMBALAJE Y ART. DE LIBRERIA'), ('511990', u'511990 - VENTA AL POR MAYOR EN COMISION O CONSIGNACION DE MERCADERIAS N.C.P.'), ('512111', u'512111 - VENTA AL POR MAYOR DE CEREALES'), ('512112', u'512112 - VENTA AL POR MAYOR DE SEMILLAS'), ('512119', u'512119 - VENTA AL POR MAYOR DE MATERIAS PRIMAS AGRICOLAS Y DE LA SILVICULTURA N.C.P.'), ('512121', u'512121 - VENTA AL POR MAYOR DE LANAS,CUEROS EN BRUTO Y PRODUCTOS AFINES'), ('512129', u'512129 - VENTA AL POR MAYOR DE MATERIAS PRIMAS PECUARIAS INCLUSO ANIMALES VIVOS N.C.P.'), ('512211', u'512211 - VENTA AL POR MAYOR DE PRODUCTOS LACTEOS'), ('512212', u'512212 - VENTA AL POR MAYOR DE FIAMBRES Y QUESOS'), ('512221', u'512221 - VENTA AL POR MAYOR DE CARNES Y DERIVADOS EXCEPTO LAS DE AVES.'), ('512229', u'512229 - VENTA AL POR MAYOR DE AVES,HUEVOS Y PRODUCTOS DE GRANJA Y DE LA CAZA N.C.P.'), ('512230', u'512230 - VENTA AL POR MAYOR DE PESCADO'), ('512240', u'512240 - VENTA AL POR MAYOR Y EMPAQUE DE FRUTAS,DE LEGUMBRES Y HORTALIZAS FRESCAS'), ('512250', u'512250 - VENTA AL POR MAYOR DE PAN,PRODUCTOS DE CONFITERIA Y PASTAS FRESCAS'), ('512260', u'512260 - VENTA AL POR MAYOR DE CHOCOLATES,GOLOSINAS Y PRODUCTOS PARA KIOSCOS Y POLIRRUBROS N.C.P.,EXCEPTO CIGARRILLOS'), ('512271', u'512271 - VENTA AL POR MAYOR DE AZUCAR'), ('512272', u'512272 - VENTA AL POR MAYOR DE ACEITES Y GRASAS'), ('512273', u'512273 - VENTA AL POR MAYOR DE CAFE,TE,YERBA MATE Y OTRAS INFUSIONES Y ESPECIAS Y CONDIMENTOS'), ('512279', u'512279 - VENTA AL POR MAYOR DE PRODUCTOS Y SUBPRODUCTOS DE MOLINERIA N.C.P.'), ('512291', u'512291 - VENTA AL POR MAYOR DE FRUTAS,LEGUMBRES Y CEREALES SECOS Y EN CONSERVA'), ('512292', u'512292 - VENTA AL POR MAYOR DE ALIMENTOS PARA ANIMALES'), ('512299', u'512299 - VENTA AL POR MAYOR DE PRODUCTOS ALIMENTICIOS N.C.P.'), ('512311', u'512311 - VENTA AL POR MAYOR DE VINO'), ('512312', u'512312 - VENTA AL POR MAYOR DE BEBIDAS ESPIRITOSAS'), ('512319', u'512319 - VENTA AL POR MAYOR DE BEBIDAS ALCOHOLICAS N.C.P.'), ('512320', u'512320 - VENTA AL POR MAYOR DE BEBIDAS NO ALCOHOLICAS'), ('512400', u'512400 - VENTA AL POR MAYOR DE CIGARRILLOS Y PRODUCTOS DE TABACO'), ('513111', u'513111 - VENTA AL POR MAYOR DE PRODUCTOS TEXTILES EXCEPTO TELAS,TEJIDOS,PRENDAS Y ACCESORIOS DE VESTIR'), ('513112', u'513112 - VENTA AL POR MAYOR DE TEJIDOS (TELAS)'), ('513113', u'513113 - VENTA AL POR MAYOR DE ART. DE MERCERIA'), ('513114', u'513114 - VENTA AL POR MAYOR DE MANTELERIA,ROPA DE CAMA Y ART. TEXTILES PARA EL HOGAR'), ('513115', u'513115 - VENTA AL POR MAYOR DE TAPICES Y ALFOMBRAS DE MATERIALES TEXTILES'), ('513121', u'513121 - VENTA AL POR MAYOR DE PRENDAS DE VESTIR DE CUERO'), ('513122', u'513122 - VENTA AL POR MAYOR DE MEDIAS Y PRENDAS DE PUNTO'), ('513129', u'513129 - VENTA AL POR MAYOR DE PRENDAS DE VESTIR N.C.P.'), ('513130', u'513130 - VENTA AL POR MAYOR DE CALZADO EXCEPTO EL ORTOPEDICO'), ('513141', u'513141 - VENTA AL POR MAYOR DE PIELES Y CUEROS CURTIDOS Y SALADOS'), ('513142', u'513142 - VENTA AL POR MAYOR DE SUELAS Y AFINES'), ('513149', u'513149 - VENTA AL POR MAYOR DE ART. DE MARROQUINERIA, PARAGUAS Y PRODUCTOS SIMILARES N.C.P.'), ('513211', u'513211 - VENTA AL POR MAYOR DE LIBROS Y PUBLICACIONES'), ('513212', u'513212 - VENTA AL POR MAYOR DE DIARIOS Y REVISTAS'), ('513221', u'513221 - VENTA AL POR MAYOR DE PAPEL Y PRODUCTOS DE PAPEL Y CARTON EXCEPTO ENVASES'), ('513222', u'513222 - VENTA AL POR MAYOR DE ENVASES DE PAPEL Y CARTON'), ('513223', u'513223 - VENTA AL POR MAYOR DE ART. DE LIBRERIA Y PAPELERIA'), ('513310', u'513310 - VENTA AL POR MAYOR DE PRODUCTOS FARMACEUTICOS Y VETERINARIOS'), ('513320', u'513320 - VENTA AL POR MAYOR DE PRODUCTOS COSMETICOS,DE TOCADOR Y DE PERFUMERIA'), ('513330', u'513330 - VENTA AL POR MAYOR DE INSTRUMENTAL MEDICO Y ODONTOLOGICO Y ART. ORTOPEDICOS'), ('513410', u'513410 - VENTA AL POR MAYOR DE ART. DE OPTICA Y DE FOTOGRAFIA'), ('513420', u'513420 - VENTA AL POR MAYOR DE ART. DE RELOJERIA,JOYERIA Y FANTASIAS'), ('513511', u'513511 - VENTA AL POR MAYOR DE MUEBLES METALICOS EXCEPTO DE OFICINA'), ('513519', u'513519 - VENTA AL POR MAYOR DE MUEBLES N.C.P.EXCEPTO DE OFICINA,ART. DE MIMBRE Y CORCHO,COLCHONES Y SOMIERES'), ('513520', u'513520 - VENTA AL POR MAYOR DE ART. DE ILUMINACION'), ('513531', u'513531 - VENTA AL POR MAYOR DE ART. DE VIDRIO'), ('513532', u'513532 - VENTA AL POR MAYOR DE ART. DE BAZAR Y MENAJE EXCEPTO DE VIDRIO'), ('513540', u'513540 - VENTA AL POR MAYOR DE ARTEFACTOS PARA EL HOGAR ELECTRICOS,A GAS,KEROSENE U OTROS COMBUSTIBLES'), ('513551', u'513551 - VENTA AL POR MAYOR DE INSTRUMENTOS MUSICALES,DISCOS Y CASETES DE AUDIO Y VIDEO,ETC.'), ('513552', u'513552 - VENTA AL POR MAYOR DE EQUIPOS DE SONIDO,RADIO Y TELEVISION,COMUNICACIONES Y SUS COMPONENTES,REPUESTOS Y ACCESORIOS'), ('513910', u'513910 - VENTA AL POR MAYOR DE MATERIALES Y PRODUCTOS DE LIMPIEZA'), ('513920', u'513920 - VENTA AL POR MAYOR DE JUGUETES'), ('513930', u'513930 - VENTA AL POR MAYOR DE BICICLETAS Y RODADOS SIMILARES'), ('513940', u'513940 - VENTA AL POR MAYOR DE ART. DE ESPARCIMIENTO Y DEPORTES'), ('513950', u'513950 - VENTA AL POR MAYOR DE PAPELES PARA PARED,REVESTIMIENTO PARA PISOS DE GOMA,PLASTICO Y TEXTILES, Y ART. SIMILARES PARA LA DECORACION'), ('513991', u'513991 - VENTA AL POR MAYOR DE FLORES Y PLANTAS NATURALES Y ARTIFICIALES'), ('513992', u'513992 - VENTA AL POR MAYOR DE PRODUCTOS EN GENERAL EN ALMACENES Y SUPERMERCADOS MAYORISTAS,CON PREDOMINIO DE ALIMENTOS Y BEBIDAS'), ('513999', u'513999 - VENTA AL POR MAYOR DE ART. DE USO DOMESTICO Y/O PERSONAL N.C.P'), ('514110', u'514110 - VENTA AL POR MAYOR DE COMBUSTIBLES Y LUBRICANTES PARA AUTOMOTORES'), ('514191', u'514191 - FRACCIONAMIENTO Y DISTRIBUCION DE GAS LICUADO'), ('514199', u'514199 - VENTA AL POR MAYOR DE COMBUSTIBLES Y LUBRICANTES,EXCEPTO PARA AUTOMOTORES, LEÑA Y CARBON'), ('514200', u'514200 - VENTA AL POR MAYOR DE METALES Y MINERALES METALIFEROS'), ('514310', u'514310 - VENTA AL POR MAYOR DE ABERTURAS'), ('514320', u'514320 - VENTA AL POR MAYOR DE PRODUCTOS DE MADERA EXCEPTO MUEBLES'), ('514330', u'514330 - VENTA AL POR MAYOR DE ART. DE FERRETERIA'), ('514340', u'514340 - VENTA AL POR MAYOR DE PINTURAS Y PRODUCTOS CONEXOS'), ('514350', u'514350 - VENTA AL POR MAYOR DE VIDRIOS PLANOS Y TEMPLADOS'), ('514391', u'514391 - VENTA AL POR MAYOR DE ART. DE PLOMERIA,ELECTRICIDAD,CALEFACCION,OBRAS SANITARIAS,ETC.'), ('514392', u'514392 - VENTA AL POR MAYOR DE ART. DE LOZA,CERAMICA Y PORCELANA DE USO EN CONSTRUCCION'), ('514399', u'514399 - VENTA AL POR MAYOR DE LADRILLOS,CEMENTO,CAL,ARENA,PIEDRA,MARMOL Y MATERIALES PARA LA CONSTRUCCION N.C.P.'), ('514910', u'514910 - VENTA AL POR MAYOR DE PRODUCTOS INTERMEDIOS N.C.P.,DESPERDICIOS Y DESECHOS TEXTILES'), ('514920', u'514920 - VENTA AL POR MAYOR DE PRODUCTOS INTERMEDIOS N.C.P.,DESPERDICIOS Y DESECHOS DE PAPEL Y CARTON'), ('514931', u'514931 - VENTA AL POR MAYOR DE ABONOS,FERTILIZANTES Y PLAGUICIDAS'), ('514932', u'514932 - VENTA AL POR MAYOR DE CAUCHO Y PRODUCTOS DE CAUCHO EXCEPTO CALZADO Y AUTOPARTES'), ('514933', u'514933 - VENTA AL POR MAYOR DE ART. DE PLASTICO'), ('514940', u'514940 - VENTA AL POR MAYOR DE PRODUCTOS INTERMEDIOS N.C.P.,DESPERDICIOS Y DESECHOS METALICOS'), ('514990', u'514990 - VENTA AL POR MAYOR DE PRODUCTOS INTERMEDIOS,DESPERDICIOS Y DESECHOS N.C.P.'), ('515110', u'515110 - VENTA AL POR MAYOR DE MAQUINAS,EQUIPOS E IMPLEMENTOS DE USO EN LOS SECTORES AGROPECUARIO,JARDINERIA,SILVICULTURA,PESCA Y CAZA'), ('515120', u'515120 - VENTA AL POR MAYOR DE MAQUINAS,EQUIPOS E IMPLEMENTOS DE USO EN LA ELABORACION DE ALIMENTOS,BEBIDAS Y TABACOS'), ('515130', u'515130 - VENTA AL POR MAYOR DE MAQUINAS,EQUIPOS E IMPLEMENTOS DE USO EN LA FABRIC.DE TEXTILES,PRENDAS Y ACCESORIOS DE VESTIR,CALZADO,ART. DE CUERO Y MARROQUINERIA'), ('515140', u'515140 - VENTA AL POR MAYOR DE MAQUINAS,EQUIPOS E IMPLEMENTOS DE USO EN IMPRENTAS,ARTES GRAFICAS Y ACTIVIDADES CONEXAS'), ('515150', u'515150 - VENTA AL POR MAYOR DE MAQUINAS,EQUIPOS E IMPLEMENTOS DE USO MEDICO Y PARAMEDICO'), ('515160', u'515160 - VENTA AL POR MAYOR DE MAQUINAS,EQUIPOS E IMPLEMENTOS DE USO EN LA INDUSTRIA DEL PLASTICO Y DEL CAUCHO'), ('515190', u'515190 - VENTA AL POR MAYOR DE MAQUINAS,EQUIPOS E IMPLEMENTOS DE USO ESPECIAL N.C.P.'), ('515200', u'515200 - VENTA AL POR MAYOR DE MAQUINAS - HERRAMIENTA DE USO GENERAL'), ('515300', u'515300 - VENTA AL POR MAYOR DE VEHICULOS, EQUIPOS Y MAQUINAS PARA EL TRANSPORTE FERROVIARIO,AEREO Y DE NAVEGACION'), ('515410', u'515410 - VENTA AL POR MAYOR DE MUEBLES E INSTALACIONES PARA OFICINAS'), ('515420', u'515420 - VENTA AL POR MAYOR DE MUEBLES E INSTALACIONES PARA LA INDUSTRIA,EL COMERCIO Y LOS SERVICIOS N.C.P.'), ('515910', u'515910 - VENTA AL POR MAYOR DE EQUIPO PROFESIONAL Y CIENTIFICO E INSTRUMENTOS DE MEDIDA Y DE CONTROL'), ('515921', u'515921 - VENTA AL POR MAYOR DE EQUIPOS INFORMATICOS Y MAQUINAS ELECTRONICAS DE ESCRIBIR Y CALCULAR'), ('515922', u'515922 - VENTA AL POR MAYOR DE MAQUINAS Y EQUIPOS DE COMUNICACIONES,CONTROL Y SEGURIDAD'), ('515990', u'515990 - VENTA AL POR MAYOR DE MAQUINAS,EQUIPO Y MATERIALES CONEXOS N.C.P.'), ('519000', u'519000 - VENTA AL POR MAYOR DE MERCANCIAS N.C.P.'), ('521110', u'521110 - VENTA AL POR MENOR EN HIPERMERCADOS CON PREDOMINIO DE PRODUCTOS ALIMENTARIOS Y BEBIDAS'), ('521120', u'521120 - VENTA AL POR MENOR EN SUPERMERCADOS CON PREDOMINIO DE PRODUCTOS ALIMENTARIOS Y BEBIDAS'), ('521130', u'521130 - VENTA AL POR MENOR EN MINIMERCADOS CON PREDOMINIO DE PRODUCTOS ALIMENTARIOS Y BEBIDAS'), ('521190', u'521190 - VENTA AL POR MENOR EN KIOSCOS,POLIRRUBROS Y COMERCIOS NO ESPECIALIZADOS N.C.P.'), ('521200', u'521200 - VENTA AL POR MENOR EXCEPTO LA ESPECIALIZADA,SIN PREDOMINIO DE PRODUCTOS ALIMENTARIOS Y BEBIDAS'), ('522111', u'522111 - VENTA AL POR MENOR DE PRODUCTOS LACTEOS'), ('522112', u'522112 - VENTA AL POR MENOR DE FIAMBRES Y EMBUTIDOS'), ('522120', u'522120 - VENTA AL POR MENOR DE PRODUCTOS DE ALMACEN Y DIETETICA'), ('522210', u'522210 - VENTA AL POR MENOR DE CARNES ROJAS,MENUDENCIAS Y CHACINADOS FRESCOS'), ('522220', u'522220 - VENTA AL POR MENOR DE HUEVOS,CARNE DE AVES Y PRODUCTOS DE GRANJA Y DE LA CAZA N.C.P.'), ('522300', u'522300 - VENTA AL POR MENOR DE FRUTAS,LEGUMBRES Y HORTALIZAS FRESCAS'), ('522410', u'522410 - VENTA AL POR MENOR DE PAN Y PRODUCTOS DE PANADERIA'), ('522420', u'522420 - VENTA AL POR MENOR DE BOMBONES,GOLOSINAS Y DEMAS PRODUCTOS DE CONFITERIA'), ('522500', u'522500 - VENTA AL POR MENOR DE BEBIDAS'), ('522910', u'522910 - VENTA AL POR MENOR DE PESCADOS Y PRODUCTOS DE LA PESCA'), ('522991', u'522991 - VENTA AL POR MENOR DE TABACO Y SUS PRODUCTOS'), ('522999', u'522999 - VENTA AL POR MENOR DE PRODUCTOS ALIMENTARIOS N.C.P.,EN COMERCIOS ESPECIALIZADOS'), ('523110', u'523110 - VENTA AL POR MENOR DE PRODUCTOS FARMACEUTICOS Y DE HERBORISTERIA'), ('523120', u'523120 - VENTA AL POR MENOR DE PRODUCTOS COSMETICOS,DE TOCADOR Y DE PERFUMERIA'), ('523130', u'523130 - VENTA AL POR MENOR DE INSTRUMENTAL MEDICO Y ODONTOLOGICO Y ART. ORTOPEDICOS'), ('523210', u'523210 - VENTA AL POR MENOR DE HILADOS,TEJIDOS Y ART. DE MERCERIA'), ('523220', u'523220 - VENTA AL POR MENOR DE CONFECCIONES PARA EL HOGAR'), ('523290', u'523290 - VENTA AL POR MENOR DE ART. TEXTILES N.C.P.EXCEPTO PRENDAS DE VESTIR'), ('523310', u'523310 - VENTA AL POR MENOR DE ROPA INTERIOR,MEDIAS,PRENDAS PARA DORMIR Y PARA LA PLAYA'), ('523320', u'523320 - VENTA AL POR MENOR DE INDUMENTARIA DE TRABAJO,UNIFORMES Y GUARDAPOLVOS'), ('523330', u'523330 - VENTA AL POR MENOR DE INDUMENTARIA PARA BEBES Y NIÑOS'), ('523391', u'523391 - VENTA AL POR MENOR DE PRENDAS DE VESTIR DE CUERO Y SUCEDANEOS EXCEPTO CALZADO'), ('523399', u'523399 - VENTA AL POR MENOR DE PRENDAS Y ACCESORIOS DE VESTIR N.C.P.EXCEPTO CALZADO,ART. DE MARROQUINERIA,PARAGUAS Y SIMILARES'), ('523410', u'523410 - VENTA AL POR MENOR DE ART. REGIONALES Y DE TALABARTERIA'), ('523420', u'523420 - VENTA AL POR MENOR DE CALZADO EXCEPTO EL ORTOPEDICO'), ('523490', u'523490 - VENTA AL POR MENOR DE ART. DE MARROQUINERIA,PARAGUAS Y SIMILARES N.C.P.'), ('523510', u'523510 - VENTA AL POR MENOR DE MUEBLES EXCEPTO DE OFICINA,LA INDUSTRIA,EL COMERCIO Y LOS SERVICIOS,ART. DE MIMBRE Y CORCHO'), ('523520', u'523520 - VENTA AL POR MENOR DE COLCHONES Y SOMIERES'), ('523530', u'523530 - VENTA AL POR MENOR DE ART. DE ILUMINACION'), ('523540', u'523540 - VENTA AL POR MENOR DE ART. DE BAZAR Y MENAJE'), ('523550', u'523550 - VENTA AL POR MENOR DE ARTEFACTOS PARA EL HOGAR ELECTRICOS,A GAS,A KEROSENE U OTROS COMBUSTIBLES'), ('523560', u'523560 - VENTA AL POR MENOR DE INSTRUMENTOS MUSICALES,EQUIPOS DE SONIDO,CASETES DE AUDIO Y VIDEO,DISCOS DE AUDIO Y VIDEO'), ('523590', u'523590 - VENTA AL POR MENOR DE ART. PARA EL HOGAR N.C.P.'), ('523610', u'523610 - VENTA AL POR MENOR DE ABERTURAS'), ('523620', u'523620 - VENTA AL POR MENOR DE MADERAS Y ART. DE MADERA Y CORCHO EXCEPTO MUEBLES'), ('523630', u'523630 - VENTA AL POR MENOR DE ART. DE FERRETERIA'), ('523640', u'523640 - VENTA AL POR MENOR DE PINTURAS Y PRODUCTOS CONEXOS'), ('523650', u'523650 - VENTA AL POR MENOR DE ART. PARA PLOMERIA E INSTALACION DE GAS'), ('523660', u'523660 - VENTA AL POR MENOR DE CRISTALES,ESPEJOS,MAMPARAS Y CERRAMIENTOS'), ('523670', u'523670 - VENTA AL POR MENOR DE PAPELES PARA PARED,REVESTIMIENTOS PARA PISOS Y ART. SIMILARES PARA LA DECORACION'), ('523690', u'523690 - VENTA AL POR MENOR DE MATERIALES DE CONSTRUCCION N.C.P.'), ('523710', u'523710 - VENTA AL POR MENOR DE ART. DE OPTICA Y FOTOGRAFIA'), ('523720', u'523720 - VENTA AL POR MENOR DE ART. DE RELOJERIA,JOYERIA Y FANTASIA'), ('523810', u'523810 - VENTA AL POR MENOR DE LIBROS Y PUBLICACIONES'), ('523820', u'523820 - VENTA AL POR MENOR DE DIARIOS Y REVISTAS'), ('523830', u'523830 - VENTA AL POR MENOR DE PAPEL,CARTON,MATERIALES DE EMBALAJE Y ART. DE LIBRERIA'), ('523911', u'523911 - VENTA AL POR MENOR DE FLORES Y PLANTAS NATURALES Y ARTIFICIALES'), ('523912', u'523912 - VENTA AL POR MENOR DE SEMILLAS,ABONOS,FERTILIZANTES Y OTROS PRODUCTOS DE VIVERO'), ('523920', u'523920 - VENTA AL POR MENOR DE MATERIALES Y PRODUCTOS DE LIMPIEZA'), ('523930', u'523930 - VENTA AL POR MENOR DE JUGUETES Y ART. DE COTILLON'), ('523941', u'523941 - VENTA AL POR MENOR DE ART. DE DEPORTE,EQUIPOS E INDUMENTARIA DEPORTIVA'), ('523942', u'523942 - VENTA AL POR MENOR DE ARMAS Y ART. DE CUCHILLERIA,ART. PARA LA CAZA Y PESCA'), ('523950', u'523950 - VENTA AL POR MENOR DE MAQUINAS Y EQUIPOS PARA OFICINA Y SUS COMPONENTES Y REPUESTOS'), ('523960', u'523960 - VENTA AL POR MENOR DE FUEL OIL,GAS EN GARRAFAS,CARBON Y LEÑA'), ('523970', u'523970 - VENTA AL POR MENOR DE PRODUCTOS VETERINARIOS Y ANIMALES DOMESTICOS'), ('523991', u'523991 - VENTA AL POR MENOR DE ART. DE CAUCHO EXCEPTO CAMARAS Y CUBIERTAS'), ('523992', u'523992 - VENTA AL POR MENOR DE MAQUINAS Y MOTORES Y SUS REPUESTOS'), ('523993', u'523993 - VENTA AL POR MENOR DE EQUIPO PROFESIONAL Y CIENTIFICO E INSTRUMENTOS DE MEDIDA Y DE CONTROL'), ('523994', u'523994 - VENTA AL POR MENOR DE ART. DE COLECCION Y OBJETOS DE ARTE'), ('523999', u'523999 - VENTA AL POR MENOR DE ART. NUEVOS N.C.P.'), ('524100', u'524100 - VENTA AL POR MENOR DE MUEBLES USADOS'), ('524200', u'524200 - VENTA AL POR MENOR DE LIBROS,REVISTAS Y SIMILARES USADOS'), ('524910', u'524910 - VENTA AL POR MENOR DE ANTIGÜEDADES'), ('524990', u'524990 - VENTA AL POR MENOR DE ART. USADOS N.C.P.EXCLUIDOS AUTOMOTORES Y MOTOCICLETAS'), ('525100', u'525100 - VENTA AL POR MENOR POR CORREO,TELEVISION,INTERNET Y OTROS MEDIOS DE COMUNICACION'), ('525200', u'525200 - VENTA AL POR MENOR EN PUESTOS MOVILES'), ('525900', u'525900 - VENTA AL POR MENOR NO REALIZADA EN ESTABLECIMIENTOS N.C.P.'), ('526100', u'526100 - REPARACION DE CALZADO Y ART. DE MARROQUINERIA'), ('526200', u'526200 - REPARACION DE ART. ELECTRICOS DE USO DOMESTICO'), ('526901', u'526901 - REPARACION DE RELOJES Y JOYAS'), ('526909', u'526909 - REPARACION DE ART. N.C.P.'), ('551100', u'551100 - SERVICIOS DE ALOJAMIENTO EN CAMPING'), ('551210', u'551210 - SERVICIOS DE ALOJAMIENTO POR HORA'), ('551221', u'551221 - SERVICIOS DE ALOJAMIENTO EN PENSIONES'), ('551222', u'551222 - SERVICIOS DE ALOJAMIENTO EN HOTELES,HOSTERIAS Y RESIDENCIALES SIMILARES,EXCEPTO POR HORA,QUE INCLUYEN SERVICIO DE RESTAURANTE AL PUBLICO'), ('551223', u'551223 - SERVICIOS DE ALOJAMIENTO EN HOTELES,HOSTERIAS Y RESIDENCIALES SIMILARES,EXCEPTO POR HORA,QUE NO INCLUYEN SERVICIO DE RESTAURANTE AL PUBLICO'), ('551229', u'551229 - SERVICIOS DE HOSPEDAJE TEMPORAL N.C.P.'), ('552111', u'552111 - SERVICIOS DE RESTAURANTES Y CANTINAS SIN ESPECTACULO'), ('552112', u'552112 - SERVICIOS DE RESTAURANTES Y CANTINAS CON ESPECTACULO'), ('552113', u'552113 - SERVICIOS DE PIZZERIAS,FAST FOOD Y LOCALES DE VENTA DE COMIDAS Y BEBIDAS AL PASO'), ('552114', u'552114 - SERVICIOS DE BARES Y CONFITERIAS'), ('552119', u'552119 - SERVICIOS DE EXPENDIO DE COMIDAS Y BEBIDAS EN ESTABLECIMIENTOS CON SERVICIO DE MESA Y/O EN MOSTRADOR - EXCEPTO EN HELADERIAS - N.C.P.'), ('552120', u'552120 - EXPENDIO DE HELADOS'), ('552210', u'552210 - PROVISION DE COMIDAS PREPARADAS PARA EMPRESAS'), ('552290', u'552290 - PREPARACION Y VENTA DE COMIDAS PARA LLEVAR N.C.P.'), ('601100', u'601100 - SERVICIO DE TRANSPORTE FERROVIARIO DE CARGAS'), ('601210', u'601210 - SERVICIO DE TRANSPORTE FERROVIARIO URBANO Y SUBURBANO DE PASAJEROS'), ('601220', u'601220 - SERVICIO DE TRANSPORTE FERROVIARIO INTERURBANO DE PASAJEROS'), ('602110', u'602110 - SERVICIOS DE MUDANZA'), ('602120', u'602120 - SERVICIOS DE TRANSPORTE DE MERCADERIAS A GRANEL,INCLUIDO EL TRANSPORTE POR CAMION CISTERNA'), ('602130', u'602130 - SERVICIOS DE TRANSPORTE DE ANIMALES'), ('602180', u'602180 - SERVICIO DE TRANSPORTE URBANO DE CARGA N.C.P.'), ('602190', u'602190 - TRANSPORTE AUTOMOTOR DE CARGAS N.C.P.'), ('602210', u'602210 - SERVICIO DE TRANSPORTE AUTOMOTOR URBANO REGULAR DE PASAJEROS'), ('602220', u'602220 - SERVICIOS DE TRANSPORTE AUTOMOTOR DE PASAJEROS MEDIANTE TAXIS Y REMISES,ALQUILER DE AUTOS CON CHOFER'), ('602230', u'602230 - SERVICIO DE TRANSPORTE ESCOLAR'), ('602240', u'602240 - SERVICIO DE TRANSPORTE AUTOMOTOR URBANO DE OFERTA LIBRE DE PASAJEROS EXCEPTO MEDIANTE TAXIS Y REMISES,ALQUILER DE AUTOS CON CHOFER Y TRANSPORTE ESCOLAR'), ('602250', u'602250 - SERVICIO DE TRANSPORTE AUTOMOTOR INTERURBANO DE PASAJEROS'), ('602260', u'602260 - SERVICIO DE TRANSPORTE AUTOMOTOR DE PASAJEROS PARA EL TURISMO'), ('602290', u'602290 - SERVICIO DE TRANSPORTE AUTOMOTOR DE PASAJEROS N.C.P.'), ('603100', u'603100 - SERVICIO DE TRANSPORTE POR OLEODUCTOS Y POLIDUCTOS'), ('603200', u'603200 - SERVICIO DE TRANSPORTE POR GASODUCTOS'), ('611100', u'611100 - SERVICIO DE TRANSPORTE MARITIMO DE CARGA'), ('611200', u'611200 - SERVICIO DE TRANSPORTE MARITIMO DE PASAJEROS'), ('612100', u'612100 - SERVICIO DE TRANSPORTE FLUVIAL DE CARGAS'), ('612200', u'612200 - SERVICIO DE TRANSPORTE FLUVIAL DE PASAJEROS'), ('621000', u'621000 - SERVICIO DE TRANSPORTE AEREO DE CARGAS'), ('622000', u'622000 - SERVICIO DE TRANSPORTE AEREO DE PASAJEROS'), ('631000', u'631000 - SERVICIOS DE MANIPULACION DE CARGA'), ('632000', u'632000 - SERVICIOS DE ALMACENAMIENTO Y DEPOSITO'), ('633110', u'633110 - SERVICIOS DE EXPLOTACION DE INFRAESTRUCTURA PARA EL TRANSPORTE TERRESTRE,PEAJES Y OTROS DERECHOS'), ('633120', u'633120 - SERVICIOS PRESTADOS POR PLAYAS DE ESTACIONAMIENTO Y GARAJES'), ('633190', u'633190 - SERVICIOS COMPLEMENTARIOS PARA EL TRANSPORTE TERRESTRE N.C.P.'), ('633210', u'633210 - SERVICIOS DE EXPLOTACION DE INFRAESTRUCTURA PARA EL TRANSPORTE POR AGUA, DERECHOS DE PUERTO'), ('633220', u'633220 - SERVICIOS DE GUARDERIAS NAUTICAS'), ('633230', u'633230 - SERVICIOS PARA LA NAVEGACION'), ('633290', u'633290 - SERVICIOS COMPLEMENTARIOS PARA EL TRANSPORTE POR AGUA N.C.P.'), ('633310', u'633310 - SERVICIOS DE HANGARES,ESTACIONAMIENTO Y REMOLQUE DE AERONAVES'), ('633320', u'633320 - SERVICIOS PARA LA AERONAVEGACION'), ('633390', u'633390 - SERVICIOS COMPLEMENTARIOS PARA EL TRANSPORTE AEREO N.C.P.'), ('634100', u'634100 - SERVICIOS MAYORISTAS DE AGENCIAS DE VIAJES'), ('634200', u'634200 - SERVICIOS MINORISTAS DE AGENCIAS DE VIAJES'), ('634300', u'634300 - SERVICIOS COMPLEMENTARIOS DE APOYO TURISTICO'), ('635000', u'635000 - SERVICIOS DE GESTION Y LOGISTICA PARA EL TRANSPORTE DE MERCADERIAS'), ('641000', u'641000 - SERVICIOS DE CORREOS'), ('642010', u'642010 - SERVICIOS DE TRANSMISION DE RADIO Y TELEVISION'), ('642020', u'642020 - SERVICIOS DE COMUNICACION POR MEDIO DE TELEFONO,TELEGRAFO Y TELEX'), ('642091', u'642091 - EMISION DE PROGRAMAS DE TELEVISION'), ('642099', u'642099 - SERVICIOS DE TRANSMISION N.C.P.DE SONIDO,IMAGENES,DATOS U OTRA INFORMACION'), ('651100', u'651100 - SERVICIOS DE LA BANCA CENTRAL'), ('652110', u'652110 - SERVICIOS DE LA BANCA MAYORISTA'), ('652120', u'652120 - SERVICIOS DE LA BANCA DE INVERSION'), ('652130', u'652130 - SERVICIOS DE LA BANCA MINORISTA'), ('652201', u'652201 - SERVICIOS DE INTERMEDIACION FINANCIERA REALIZADA POR LAS COMPAÑIAS FINANCIERAS'), ('652202', u'652202 - SERVICIOS DE INTERMEDIACION FINANCIERA REALIZADA POR SOC.DE AHORRO Y PRESTAMO PARA LA VIVIENDA Y OTROS INMUEBLES'), ('652203', u'652203 - SERVICIOS DE INTERMEDIACION FINANCIERA REALIZADA POR CAJAS DE CREDITO'), ('659810', u'659810 - ACTIVIDADES DE CREDITO PARA FINANCIAR OTRAS ACTIVIDADES ECONOMICAS'), ('659890', u'659890 - SERVICIOS DE CREDITO N.C.P.'), ('659910', u'659910 - SERVICIOS DE AGENTES DE MERCADO ABIERTO PUROS'), ('659920', u'659920 - SERVICIOS DE ENTIDADES DE TARJETA DE COMPRA Y/O CREDITO'), ('659990', u'659990 - SERVICIOS DE FINANCIACION Y ACTIVIDADES FINANCIERAS N.C.P.'), ('661110', u'661110 - SERVICIOS DE SEGUROS DE SALUD'), ('661120', u'661120 - SERVICIOS DE SEGUROS DE VIDA'), ('661130', u'661130 - SERVICIOS DE SEGUROS A LAS PERSONAS EXCEPTO LOS DE SALUD Y DE VIDA'), ('661210', u'661210 - SERVICIOS DE ASEGURADORAS DE RIESGO DE TRABAJO'), ('661220', u'661220 - SERVICIOS DE SEGUROS PATRIMONIALES EXCEPTO LOS DE LAS ASEGURADORAS DE RIESGO DE TRABAJO'), ('661300', u'661300 - REASEGUROS'), ('662000', u'662000 - ADMINISTRACION DE FONDOS DE JUBILACIONES Y PENSIONES'), ('671110', u'671110 - SERVICIOS DE MERCADOS Y CAJAS DE VALORES'), ('671120', u'671120 - SERVICIOS DE MERCADOS A TERMINO'), ('671130', u'671130 - SERVICIOS DE BOLSAS DE COMERCIO'), ('671200', u'671200 - SERVICIOS BURSATILES DE MEDIACION O POR CUENTA DE TERCEROS'), ('671910', u'671910 - SERVICIOS DE CASAS Y AGENCIAS DE CAMBIO'), ('671920', u'671920 - SERVICIOS DE SOC.CALIFICADORAS DE RIESGOS'), ('671990', u'671990 - SERVICIOS AUXILIARES A LA INTERMEDIACION FINANCIERA N.C.P.,EXCEPTO A LOS SERVICIOS DE SEGUROS Y DE ADMINISTRACION DE FONDOS DE JUBILACIONES Y PENSIONES'), ('672110', u'672110 - SERVICIOS DE PRODUCTORES Y ASESORES DE SEGUROS'), ('672190', u'672190 - SERVICIOS AUXILIARES A LOS SERVICIOS DE SEGUROS N.C.P.'), ('672200', u'672200 - SERVICIOS AUXILIARES A LA ADMINISTRACION DE FONDOS DE JUBILACIONES Y PENSIONES'), ('701010', u'701010 - SERVICIOS DE ALQUILER Y EXPLOTACION DE INMUEBLES PARA FIESTAS,CONVENCIONES Y OTROS EVENTOS SIMILARES'), ('701090', u'701090 - SERVICIOS INMOBILIARIOS REALIZADOS POR CUENTA PROPIA,CON BIENES PROPIOS O ARRENDADOS N.C.P.'), ('702000', u'702000 - SERVICIOS INMOBILIARIOS REALIZADOS A CAMBIO DE UNA RETRIBUCION O POR CONTRATA'), ('711100', u'711100 - ALQUILER DE EQUIPO DE TRANSPORTE PARA VIA TERRESTRE,SIN OPERARIOS NI TRIPULACION'), ('711200', u'711200 - ALQUILER DE EQUIPO DE TRANSPORTE PARA VIA ACUATICA,SIN OPERARIOS NI TRIPULACION'), ('711300', u'711300 - ALQUILER DE EQUIPO DE TRANSPORTE PARA VIA AEREA,SIN OPERARIOS NI TRIPULACION'), ('712100', u'712100 - ALQUILER DE MAQUINARIA Y EQUIPO AGROPECUARIO,SIN OPERARIOS'), ('712200', u'712200 - ALQUILER DE MAQUINARIA Y EQUIPO DE CONSTRUCCION E INGENIERIA CIVIL,SIN OPERARIOS'), ('712300', u'712300 - ALQUILER DE MAQUINARIA Y EQUIPO DE OFICINA,INCLUSO COMPUTADORAS'), ('712901', u'712901 - ALQUILER DE MAQUINARIA Y EQUIPO PARA LA INDUSTRIA MANUFACTURERA,SIN PERSONAL'), ('712902', u'712902 - ALQUILER DE MAQUINARIA Y EQUIPO MINERO Y PETROLERO,SIN PERSONAL'), ('712909', u'712909 - ALQUILER DE MAQUINARIA Y EQUIPO N.C.P.,SIN PERSONAL'), ('713001', u'713001 - ALQUILER DE ROPA'), ('713009', u'713009 - ALQUILER DE EFECTOS PERSONALES Y ENSERES DOMESTICOS N.C.P.'), ('721000', u'721000 - SERVICIOS DE CONSULTORES EN EQUIPO DE INFORMATICA'), ('722000', u'722000 - SERVICIOS DE CONSULTORES EN INFORMATICA Y SUMINISTROS DE PROGRAMAS DE INFORMATICA'), ('723000', u'723000 - PROCESAMIENTO DE DATOS'), ('724000', u'724000 - SERVICIOS RELACIONADOS CON BASES DE DATOS'), ('725000', u'725000 - MANTENIMIENTO Y REPARACION DE MAQUINARIA DE OFICINA,CONTABILIDAD E INFORMATICA'), ('729000', u'729000 - ACTIVIDADES DE INFORMATICA N.C.P.'), ('731100', u'731100 - INVESTIGACION Y DESARROLLO EXPERIMENTAL EN EL CAMPO DE LA INGENIERIA Y LA TECNOLOGIA'), ('731200', u'731200 - INVESTIGACION Y DESARROLLO EXPERIMENTAL EN EL CAMPO DE LAS CIENCIAS MEDICAS'), ('731300', u'731300 - INVESTIGACION Y DESARROLLO EXPERIMENTAL EN EL CAMPO DE LAS CIENCIAS AGROPECUARIAS'), ('731900', u'731900 - INVESTIGACION Y DESARROLLO EXPERIMENTAL EN EL CAMPO DE LAS CIENCIAS EXACTAS Y NATURALES N.C.P.'), ('732100', u'732100 - INVESTIGACION Y DESARROLLO EXPERIMENTAL EN EL CAMPO DE LAS CIENCIAS SOCIALES'), ('732200', u'732200 - INVESTIGACION Y DESARROLLO EXPERIMENTAL EN EL CAMPO DE LAS CIENCIAS HUMANAS'), ('741101', u'741101 - SERVICIOS JURIDICOS'), ('741102', u'741102 - SERVICIOS NOTARIALES'), ('741200', u'741200 - SERVICIOS DE CONTABILIDAD Y TENEDURIA DE LIBROS,AUDITORIA Y ASESORIA FISCAL'), ('741300', u'741300 - ESTUDIO DE MERCADO,REALIZACION DE ENCUESTAS DE OPINION PUBLICA'), ('741401', u'741401 - SERVICIOS DE ASESORAMIENTO,DIRECCION Y GESTION EMPRESARIAL REALIZADOS POR INTEGRANTES DE LOS ORGANOS DE ADMINISTRACION Y/O FISCALIZACION EN SOC.ANONIMAS'), ('741402', u'741402 - SERVICIOS DE ASESORAMIENTO,DIRECCION Y GESTION EMPRESARIAL REALIZADOS POR INTEGRANTES DE CUERPOS DE DIRECCION EN SOC.EXCEPTO LAS ANONIMAS'), ('741409', u'741409 - SERVICIOS DE ASESORAMIENTO,DIRECCION Y GESTION EMPRESARIAL N.C.P.'), ('742101', u'742101 - SERVICIOS RELACIONADOS CON LA CONSTRUCCION.'), ('742102', u'742102 - SERVICIOS GEOLOGICOS Y DE PROSPECCION'), ('742103', u'742103 - SERVICIOS RELACIONADOS CON LA ELECTRONICA Y LAS COMUNICACIONES'), ('742109', u'742109 - SERVICIOS DE ARQUITECTURA E INGENIERIA Y SERVICIOS CONEXOS DE ASESORAMIENTO TECNICO N.C.P.'), ('742200', u'742200 - ENSAYOS Y ANALISIS TECNICOS'), ('743000', u'743000 - SERVICIOS DE PUBLICIDAD'), ('749100', u'749100 - OBTENCION Y DOTACION DE PERSONAL'), ('749210', u'749210 - SERVICIOS DE TRANSPORTE DE CAUDALES Y OBJETOS DE VALOR'), ('749290', u'749290 - SERVICIOS DE INVESTIGACION Y SEGURIDAD N.C.P.'), ('749300', u'749300 - SERVICIOS DE LIMPIEZA DE EDIFICIOS'), ('749400', u'749400 - SERVICIOS DE FOTOGRAFIA'), ('749500', u'749500 - SERVICIOS DE ENVASE Y EMPAQUE'), ('749600', u'749600 - SERVICIOS DE IMPRESION HELIOGRAFICA,FOTOCOPIA Y OTRAS FORMAS DE REPRODUCCIONES'), ('749900', u'749900 - SERVICIOS EMPRESARIALES N.C.P.'), ('751100', u'751100 - SERVICIOS GENERALES DE LA ADMINISTRACION PUBLICA'), ('751200', u'751200 - SERVICIOS PARA LA REGULACION DE LAS ACTIVIDADES SANITARIAS,EDUCATIVAS,CULTURALES,Y RESTANTES SERVICIOS SOCIALES,EXCEPTO SEGURIDAD SOCIAL OBLIGATORIA'), ('751300', u'751300 - SERVICIOS PARA LA REGULACION DE LA ACTIVIDAD ECONOMICA'), ('751900', u'751900 - SERVICIOS AUXILIARES PARA LOS SERVICIOS GENERALES DE LA ADMINISTRACION PUBLICA N.C.P.'), ('752100', u'752100 - SERVICIOS DE ASUNTOS EXTERIORES'), ('752200', u'752200 - SERVICIOS DE DEFENSA'), ('752300', u'752300 - SERVICIOS DE JUSTICIA'), ('752400', u'752400 - SERVICIOS PARA EL ORDEN PUBLICO Y LA SEGURIDAD'), ('752500', u'752500 - SERVICIOS DE PROTECCION CIVIL'), ('753000', u'753000 - SERVICIOS DE LA SEGURIDAD SOCIAL OBLIGATORIA'), ('801000', u'801000 - ENSEÑANZA INICIAL Y PRIMARIA'), ('802100', u'802100 - ENSEÑANZA SECUNDARIA DE FORMACION GENERAL'), ('802200', u'802200 - ENSEÑANZA SECUNDARIA DE FORMACION TECNICA Y PROFESIONAL'), ('803100', u'803100 - ENSEÑANZA TERCIARIA'), ('803200', u'803200 - ENSEÑANZA UNIVERSITARIA EXCEPTO FORMACION DE POSGRADO'), ('803300', u'803300 - FORMACION DE POSGRADO'), ('809000', u'809000 - ENSEÑANZA PARA ADULTOS Y SERVICIOS DE ENSEÑANZA N.C.P.'), ('851110', u'851110 - SERVICIOS DE INTERNACION'), ('851120', u'851120 - SERVICIOS DE HOSPITAL DE DIA'), ('851190', u'851190 - SERVICIOS HOSPITALARIOS N.C.P.'), ('851210', u'851210 - SERVICIOS DE ATENCION AMBULATORIA'), ('851220', u'851220 - SERVICIOS DE ATENCION DOMICILIARIA PROGRAMADA'), ('851300', u'851300 - SERVICIOS ODONTOLOGICOS'), ('851400', u'851400 - SERVICIOS DE DIAGNOSTICO'), ('851500', u'851500 - SERVICIOS DE TRATAMIENTO'), ('851600', u'851600 - SERVICIOS DE EMERGENCIAS Y TRASLADOS'), ('851900', u'851900 - SERVICIOS RELACIONADOS CON LA SALUD HUMANA N.C.P.'), ('852000', u'852000 - SERVICIOS VETERINARIOS'), ('853110', u'853110 - SERVICIOS DE ATENCION A ANCIANOS CON ALOJAMIENTO'), ('853120', u'853120 - SERVICIOS DE ATENCION A PERSONAS MINUSVALIDAS CON ALOJAMIENTO'), ('853130', u'853130 - SERVICIOS DE ATENCION A MENORES CON ALOJAMIENTO'), ('853140', u'853140 - SERVICIOS DE ATENCION A MUJERES CON ALOJAMIENTO'), ('853190', u'853190 - SERVICIOS SOCIALES CON ALOJAMIENTO N.C.P.'), ('853200', u'853200 - SERVICIOS SOCIALES SIN ALOJAMIENTO'), ('900010', u'900010 - RECOLECCION,REDUCCION Y ELIMINACION DE DESPERDICIOS'), ('900020', u'900020 - SERVICIOS DE DEPURACION DE AGUAS RESIDUALES,ALCANTARILLADO Y CLOACAS'), ('900090', u'900090 - SERVICIOS DE SANEAMIENTO PUBLICO N.C.P.'), ('911100', u'911100 - SERVICIOS DE FEDERACIONES,ASOCIACIONES,CAMARAS,GREMIOS Y ORGANIZACIONES SIMILARES'), ('911200', u'911200 - SERVICIOS DE ASOCIACIONES DE ESPECIALISTAS EN DISCIPLINAS CIENTIFICAS,PRACTICAS PROFESIONALES Y ESFERAS TECNICAS'), ('912000', u'912000 - SERVICIOS DE SINDICATOS'), ('919100', u'919100 - SERVICIOS DE ORGANIZACIONES RELIGIOSAS'), ('919200', u'919200 - SERVICIOS DE ORGANIZACIONES POLITICAS'), ('919900', u'919900 - SERVICIOS DE ASOCIACIONES N.C.P.'), ('921110', u'921110 - PRODUCCION DE FILMES Y VIDEOCINTAS'), ('921120', u'921120 - DISTRIBUCION DE FILMES Y VIDEOCINTAS'), ('921200', u'921200 - EXHIBICION DE FILMES Y VIDEOCINTAS'), ('921301', u'921301 - SERVICIOS DE RADIO'), ('921302', u'921302 - PRODUCCION Y DISTRIBUCION POR TELEVISION'), ('921410', u'921410 - PRODUCCION DE ESPECTACULOS TEATRALES Y MUSICALES'), ('921420', u'921420 - COMPOSICION Y REPRESENTACION DE OBRAS TEATRALES,MUSICALES Y ARTISTICAS'), ('921430', u'921430 - SERVICIOS CONEXOS A LA PRODUCCION DE ESPECTACULOS TEATRALES Y MUSICALES'), ('921910', u'921910 - SERVICIOS DE SALONES DE BAILE,DISCOTECAS Y SIMILARES'), ('921990', u'921990 - SERVICIOS DE ESPECTACULOS ARTISTICOS Y DE DIVERSION N.C.P.'), ('922000', u'922000 - SERVICIOS DE AGENCIAS DE NOTICIAS Y SERVICIOS DE INFORMACION'), ('923100', u'923100 - SERVICIOS DE BIBLIOTECAS Y ARCHIVOS'), ('923200', u'923200 - SERVICIOS DE MUSEOS Y PRESERVACION DE LUGARES Y EDIFICIOS HISTORICOS'), ('923300', u'923300 - SERVICIOS DE JARDINES BOTANICOS,ZOOLOGICOS Y DE PARQUES NACIONALES'), ('924110', u'924110 - SERVICIOS DE ORGANIZACION,DIRECCION Y GESTION DE PRACTICAS DEPORTIVAS Y EXPLOTACION DE LAS INSTALACIONES'), ('924120', u'924120 - PROMOCION Y PRODUCCION DE ESPECTACULOS DEPORTIVOS'), ('924130', u'924130 - SERVICIOS PRESTADOS POR PROFESIONALES Y TECNICOS,PARA LA REALIZACION DE PRACTICAS DEPORTIVAS'), ('924910', u'924910 - SERVICIOS DE ESPARCIMIENTO RELACIONADOS CON JUEGOS DE AZAR Y APUESTAS'), ('924920', u'924920 - SERVICIOS DE SALONES DE JUEGOS'), ('924990', u'924990 - SERVICIOS DE ENTRETENIMIENTO N.C.P.'), ('930100', u'930100 - LAVADO Y LIMPIEZA DE ART. DE TELA,CUERO Y/O DE PIEL,INCLUSO LA LIMPIEZA EN SECO'), ('930201', u'930201 - SERVICIOS DE PELUQUERIA'), ('930202', u'930202 - SERVICIOS DE TRATAMIENTO DE BELLEZA,EXCEPTO LOS DE PELUQUERIA'), ('930300', u'930300 - POMPAS FUNEBRES Y SERVICIOS CONEXOS'), ('930910', u'930910 - SERVICIOS PARA EL MANTENIMIENTO FISICO-CORPORAL'), ('930990', u'930990 - SERVICIOS N.C.P.'), ('950000', u'950000 - SERVICIOS DE HOGARES PRIVADOS QUE CONTRATAN SERVICIO DOMESTICO'), ('990000', u'990000 - SERVICIOS DE ORGANIZACIONES Y ORGANOS EXTRATERRITORIALES'), ]
from __future__ import division, absolute_import, unicode_literals import time from qtpy import QtWidgets from qtpy.QtCore import Qt from qtpy.QtCore import Signal from .. import qtutils from ..i18n import N_ from . import defs from .text import MonoTextView class LogWidget(QtWidgets.QWidget): """A simple dialog to display command logs.""" channel = Signal(object) def __init__(self, parent=None, output=None): QtWidgets.QWidget.__init__(self, parent) self.output_text = MonoTextView(self) if output: self.set_output(output) self.main_layout = qtutils.vbox(defs.no_margin, defs.spacing, self.output_text) self.setLayout(self.main_layout) self.channel.connect(self.log, type=Qt.QueuedConnection) def clear(self): self.output_text.clear() def set_output(self, output): self.output_text.setText(output) def log_status(self, status, out, err=None): msg = [] if out: msg.append(out) if err: msg.append(err) if status: msg.append(N_('exit code %s') % status) self.log('\n'.join(msg)) def log(self, msg): if not msg: return cursor = self.output_text.textCursor() cursor.movePosition(cursor.End) text = self.output_text cursor.insertText(time.asctime() + '\n') for line in msg.splitlines(): cursor.insertText(line + '\n') cursor.insertText('\n') cursor.movePosition(cursor.End) text.setTextCursor(cursor) def safe_log(self, msg): """A version of the log() method that can be called from other threads.""" self.channel.emit(msg)
from ReddiWrap import ReddiWrap reddit = ReddiWrap(user_agent='ReddiWrap') USERNAME = 'your_username' PASSWORD = 'your_password' MOD_SUB = 'your_subreddit' # A subreddit moderated by USERNAME reddit.load_cookies('cookies.txt') if not reddit.logged_in or reddit.user.lower() != USERNAME.lower(): print('logging into %s' % USERNAME) login = reddit.login(user=USERNAME, password=PASSWORD) if login != 0: # 1 means invalid password, 2 means rate limited, -1 means unexpected error print('unable to log in: %d' % login) print('remember to change USERNAME and PASSWORD') exit(1) # Save cookies so we won't have to log in again later reddit.save_cookies('cookies.txt') print('logged in as %s' % reddit.user) uinfo = reddit.user_info() print('\nlink karma: %d' % uinfo.link_karma) print('comment karma: %d' % uinfo.comment_karma) created = int(uinfo.created) print('account created on: %s' % reddit.time_to_date(created)) print('time since creation: %s\n' % reddit.time_since(created)) posts = reddit.get('/r/%s' % MOD_SUB) print('posts in subreddit /r/%s:' % MOD_SUB) for post in posts: print(post) post = None for p in posts: if p.num_comments > 0: post = p break if post == None: print('unable to find post with comments. exiting') exit(1) print('fetching comments for "%s"' % (post.title)) reddit.fetch_comments(post) for comment in post.comments: # Print first 60 chars of all root-level comments print('\t%s: "%s"' % (comment.author, comment.body[:60].replace('\n',' '))) if len(post.comments) > 0: comment = post.comments[0] reddit.upvote(post) reddit.upvote(comment) reddit.save(post) print('replying to post "%s"' % (post.title)) result = reddit.reply(post, "I can't believe this is in /r/%s" % post.subreddit) if result == {}: print('unable to reply to post') else: print('replied to %s, %s' % (result['parent'], result['id'])) print('replying to comment by %s' % (comment.author)) result = reddit.reply(comment, "I like the part where you said:\n\n>%s" % comment.body.replace('\n\n', '\n\n>')) if result == {}: print('unable to reply to post') else: print('replied to %s, %s' % (result['parent'], result['id'])) print('submitting link to /r/%s' % MOD_SUB) result = reddit.post_link('best website ever', 'http://derv.us', MOD_SUB) if result == '': print('unable to submit link!') else: print('submitted. %s' % result) print('submitting self-post to /r/%s' % MOD_SUB) result = reddit.post_self('DAE breathe?', 'I thought I was the only one!', MOD_SUB) if result == '': print('unable to submit link!') else: print('submitted.', result) modmsgs = reddit.get('/message/moderator') for msg in modmsgs: print(msg) reddit.mark_message(msg) # mark as read posts = reddit.get('/r/%s/about/reports/' % MOD_SUB) posts = reddit.get('/r/%s/about/spam/' % MOD_SUB) for post in posts: print(post) subs = reddit.get('/reddits') subbed = False while not subbed: # Iterate over all subreddits for sub in subs: if sub.display_name == 'spacedicks': reddit.subscribe(sub) print('subscribed to spacedicks') subbed = True break if not subbed and reddit.has_next(): print('loading next page of subreddits...') subs = reddit.get_next() else: break subs = reddit.get('/reddits/mine') for sub in subs: if sub.display_name == 'spacedicks': reddit.subscribe(sub, unsub=True) print('unsubscribed to spacedicks') break posts = reddit.get('/user/%s/submitted' % (reddit.user)) if len(posts) > 0: post = posts[0] print(post) print('marking... %s' % reddit.mark_nsfw(post)) print('reporting... %s' % reddit.report(post)) print('removing... %s' % reddit.remove(post)) print('spamming... %s' % reddit.spam(post)) print('approving... %s' % reddit.approve(post)) print('distinguishing %s' % reddit.distinguish(post)) msgs = reddit.get('/message/messages') for msg in msgs: # Print first 40 char of all messages print(msg.__repr__()[:40]) if len(msgs) > 0: msg = msgs[0] # Reply to first message print('replying to: %s' % (msg)) success = reddit.reply(msg, "Sure, I'll s***y_watercolor that in ALL CAPS while trapped in my anus... NAUT!") if success == {}: print('unable to reply!') else: print('reply successful:',success['id']) import time # For sleep(), to avoid API rate limit count = 0 subs = reddit.get('/reddits') while True: for sub in subs: # Print 10 per line print('%s,' % (sub.display_name)) count += 1 if count >= 100 or not reddit.has_next(): break time.sleep(2) # One request every 2 seconds. subs = reddit.get_next() posts = reddit.get('/r/askscience') print(len(posts), 'posts in AskScience page #1') for post in posts: print(post) print('\n') posts = reddit.get_next() print(len(posts), 'posts in AskScience page #2') for post in posts: print(post) print('\n') posts = reddit.get_previous() print(len(posts), 'posts in AskScience page #1') for post in posts: print(post) print('\n') posts = reddit.get('/r/AskReddit') for i in range(0, 3): for post in posts: print(post) if not reddit.has_next(): break posts = reddit.get_next() def iterate_comments(comment, depth=0): """ Recursively iterate and 'pretty print' comments. """ if isinstance(comment, list): to_it = comment else: to_it = comment.children print(' ' * depth + comment.__str__()[:80].replace('\n', '\\n')) for comm in to_it: iterate_comments(comm, depth + 1) posts = reddit.get('/r/%s' % MOD_SUB) for post in posts: if post.num_comments == 0: continue print(post) reddit.fetch_comments(post) print('\ncomments:\n') iterate_comments(post.comments)
import picross import sys import time import optparse def decode_mcs(file): code = [] for l in open(file,'U').readlines(): if len(l)==44: code.extend([ int(l[2*x+9:2*x+11],16) for x in range(0,16) ]) return ''.join(map(chr,code)) def load_firmware(dev,fw): device = picross.usbdevice(dev,0) code = decode_mcs(fw) load(device,code) verify(device,code) fpga_reboot(device) def code_iterator(code): index = 0 remain = len(code) paddr = 0 while remain>0: progress = (100*index)/len(code) for baddr,length in enumerate((64,64,64,64,8)): length = min(length,remain) block = code[index:index+length] yield paddr,baddr,block,length,progress remain -= length index += length time.sleep(0.01) paddr += 1 def message(msg): sys.stdout.write(msg) sys.stdout.flush() def _hex(bytes): return ' '.join(map(lambda x:hex(ord(x))[2:].zfill(2),bytes)) def load(device,code): for paddr,baddr,block,blen,prog in code_iterator(code): message('load progress: %d\r' % prog) device.control_out(0x40,0xc9,paddr,baddr,block+'\0'*(64-blen)) message('load done. \n') class VerifyError(RuntimeError): pass def verify0(got,exp,blen): if got[:blen]!=exp[:blen]: message('\nexpected (p%d b%d): %s' % (paddr,baddr,_hex(exp))) message('\ngot (p%d b%d): %s\n' % (paddr,baddr,_hex(got))) raise VerifyError() def verify(device,code): for paddr,baddr,block,blen,prog in code_iterator(code): got=device.control_in(0x40|0x80,0xca,paddr,baddr,64) message('verify progress: %d\r' % prog) verify0(got,block,blen) message('verify done. \n') def fpga_reboot(device): message('rebooting FPGA\r') time.sleep(0.1) device.control_out(0x40,0xcb,0,0,'') message('\ndone.\n') def main(): parser = optparse.OptionParser(usage=sys.argv[0]+' [options]') parser.add_option('-f','--firmware',action='store',dest='firmware',default='',help='firmware file to load') parser.add_option('-S','--serial',action='store',dest='serial',default=0,help='serial number') parser.add_option('-v','--vid',action='store',dest='vendor',default=0x2139,help='vendor ID') parser.add_option('-p','--pid',action='store',dest='product',default=0x0002,help='product ID') (opts,args) = parser.parse_args(sys.argv) dev = picross.find(opts.vendor,opts.product) if opts.firmware: load_firmware(dev,opts.firmware) if opts.serial: set_serial(dev,opts.serial)
import traceback import importlib from .WebWizardConst import * from ..common.UCB import UCB from ..common.FileAccess import FileAccess from ..ui.event.Task import Task from ..ui.event.CommonListener import StreamListenerProcAdapter from .ProcessErrors import ProcessErrors from .ExtensionVerifier import ExtensionVerifier from .ErrorHandler import ErrorHandler from .data.CGDocument import CGDocument from com.sun.star.io import IOException from com.sun.star.uno import SecurityException from com.sun.star.beans import NamedValue from com.sun.star.beans import StringPair class Process(ProcessErrors): TASKS_PER_DOC = 5 TASKS_PER_XSL = 2 TASKS_PER_PUBLISH = 2 TASKS_IN_PREPARE = 1 TASKS_IN_EXPORT = 2 TASKS_IN_GENERATE = 2 TASKS_IN_PUBLISH = 2 TASKS_IN_FINISHUP = 1 settings = None xmsf = None errorHandler = None tempDir = None fileAccess = None ucb = None myTask = None #This is a cache for exporters, so I do not need to #instanciate the same exporter more than once. exporters = {} result = None def __init__(self, settings, xmsf, er): self.xmsf = xmsf self.settings = settings self.fileAccess = FileAccess(xmsf) self.errorHandler = er self.ucb = UCB(xmsf) self.taskSteps = self.getTaskSteps() self.myTask = Task(TASK, TASK_PREPARE, self.taskSteps) # @return to how many destinations should the # generated site be published. def countPublish(self): count = 0 publishers = self.settings.cp_DefaultSession.cp_Publishing for e in publishers.childrenList: if e.cp_Publish: count += 1 return count # @return the number of task steps that this # session should have def getTaskSteps(self): docs = self.settings.cp_DefaultSession.cp_Content.cp_Documents.getSize() xsl = 0 try: layout = self.settings.cp_DefaultSession.getLayout() xsl = len(layout.getTemplates(self.xmsf)) except Exception: traceback.print_exc() publish = self.countPublish() return \ self.TASKS_IN_PREPARE + \ self.TASKS_IN_EXPORT + docs * self.TASKS_PER_DOC + \ self.TASKS_IN_GENERATE + xsl * self.TASKS_PER_XSL + \ self.TASKS_IN_PUBLISH + publish * self.TASKS_PER_PUBLISH + \ self.TASKS_IN_FINISHUP # does the job def runProcess(self): self.myTask.start() try: try: # I use here '&&' so if one of the # methods returns false, the next # will not be called. self.result = self.createTempDir(self.myTask) and self.export(self.myTask) and self.generate(self.tempDir, self.myTask) and self.publish(self.tempDir, self.myTask) finally: # cleanup must be called. self.result = self.result and self.cleanup(self.myTask) except Exception: traceback.print_exc() self.result = False if not self.result: # this is a bug protection. self.myTask.fail() while (self.myTask.getStatus() < self.myTask.getMax()): self.myTask.advance(True) # creates a temporary directory. # @param task # @return true should continue def createTempDir(self, task): try: self.tempDir = self.fileAccess.createNewDir(self.getSOTempDir(self.xmsf), "/wwiztemp") except Exception: traceback.print_exc() if self.tempDir is None: self.error(None, None, ProcessErrors.ERROR_MKDIR, ErrorHandler.ERROR_PROCESS_FATAL) return False else: task.advance(True) return True # @param xmsf # @return the staroffice /openoffice temporary directory def getSOTempDir(self, xmsf): try: return FileAccess.getOfficePath(self.xmsf, "Temp", "") except Exception: traceback.print_exc() return None # CLEANUP # delete the temporary directory # @return true should continue def cleanup(self, task): task.setSubtaskName(TASK_FINISH) b = self.fileAccess.delete(self.tempDir) if not b: self.error(None, None, ProcessErrors.ERROR_CLEANUP, ErrorHandler.ERROR_WARNING) task.advance(b) return b # This method is used to copy style files to a target # Directory: css and background. # Note that this method is static since it is # also used when displaying a "preview" def copyMedia(self, copy, settings, targetDir, task): # 1. .css sourceDir = FileAccess.connectURLs(settings.workPath, "styles") filename = settings.cp_DefaultSession.getStyle().cp_CssHref copy.copy2(sourceDir, filename, targetDir, "style.css") task.advance(True) # 2. background image background = settings.cp_DefaultSession.cp_Design.cp_BackgroundImage if (background is not None and background is not ""): sourceDir = FileAccess.getParentDir(background) filename = background[len(sourceDir):] copy.copy2(sourceDir, filename, targetDir + "/images", "background.gif") task.advance(True) # Copy "static" files (which are always the same, # thus not user-input-dependent) to a target directory. # Note that this method is static since it is # also used when displaying a "preview" # @param copy # @param settings # @param targetDir # @throws Exception @classmethod def copyStaticImages(self, copy, settings, targetDir): source = FileAccess.connectURLs(settings.workPath, "images") target = targetDir + "/images" copy.copy(source, target) # publish the given directory. # @param dir the source directory to publish from # @param task task tracking. # @return true if should continue def publish(self, folder, task): task.setSubtaskName(TASK_PUBLISH_PREPARE) configSet = self.settings.cp_DefaultSession.cp_Publishing try: self.copyMedia(self.ucb, self.settings, folder, task) self.copyStaticImages(self.ucb, self.settings, folder) task.advance(True) except Exception as ex: # error in copying media traceback.print_exc() self.error(ex, "", ProcessErrors.ERROR_PUBLISH_MEDIA, ErrorHandler.ERROR_PROCESS_FATAL) return False for p in configSet.childrenList: if p.cp_Publish: key = configSet.getKey(p) task.setSubtaskName(key) if key is ZIP_PUBLISHER: self.fileAccess.delete(p.cp_URL) if (not self.publish1(folder, p, self.ucb, task)): return False return True # publish the given directory to the # given target CGPublish. # @param dir the dir to copy from # @param publish the object that specifies the target # @param copy ucb encapsulation # @param task task tracking # @return true if should continue def publish1(self, folder, publish, copy, task): try: task.advance(True) url = publish.url copy.copy(folder, url) task.advance(True) return True except Exception as e: task.advance(False) traceback.print_exc() return self.error(e, publish, ProcessErrors.ERROR_PUBLISH, ErrorHandler.ERROR_NORMAL_IGNORE) # Generates the TOC pages for the current session. # @param targetDir generating to this directory. def generate(self, targetDir, task): result = False task.setSubtaskName(TASK_GENERATE_PREPARE) layout = self.settings.cp_DefaultSession.getLayout() try: # here I create the DOM of the TOC to pass to the XSL doc = self.settings.cp_DefaultSession.createDOM1() self.generate1(self.xmsf, layout, doc, self.fileAccess, targetDir, task) except Exception as ex: traceback.print_exc() self.error(ex, "", ProcessErrors.ERROR_GENERATE_XSLT, ErrorHandler.ERROR_PROCESS_FATAL) return False # copy files which are not xsl from layout directory to # website root. try: task.setSubtaskName(TASK_GENERATE_COPY) self.copyLayoutFiles(self.ucb, self.fileAccess, self.settings, layout, targetDir) task.advance(True) result = True except Exception as ex: task.advance(False) traceback.print_exc() return self.error(ex, None, ProcessErrors.ERROR_GENERATE_COPY, ErrorHandler.ERROR_NORMAL_ABORT) return result # copies layout files which are not .xsl files # to the target directory. # @param ucb UCB encapsulatzion object # @param fileAccess filaAccess encapsulation object # @param settings web wizard settings # @param layout the layout object # @param targetDir the target directory to copy to # @throws Exception @classmethod def copyLayoutFiles(self, ucb, fileAccess, settings, layout, targetDir): filesPath = fileAccess.getURL(FileAccess.connectURLs(settings.workPath, "layouts/"), layout.cp_FSName) ucb.copy1(filesPath, targetDir, ExtensionVerifier("xsl")) # generates the TOC page for the given layout. # This method might generate more than one file, depending # on how many .xsl files are in the # directory specifies by the given layout object. # @param xmsf # @param layout specifies the layout to use. # @param doc the DOM representation of the web wizard session # @param fileAccess encapsulation of FileAccess # @param targetPath target directory # @param task # @throws Exception @classmethod def generate1(self, xmsf, layout, doc, fileAccess, targetPath, task): # a map that contains xsl templates. the keys are the xsl file names. templates = layout.getTemplates(xmsf) self.node = doc task.advance1(True, TASK_GENERATE_XSL) # each template generates a page. for key in templates: temp = templates[key] # The target file name is like the xsl template filename # without the .xsl extension. fn = fileAccess.getPath(targetPath, key[:len(key) - 4]) args = list(range(1)) nv = NamedValue() nv.Name = "StylesheetURL" nv.Value = temp args[0] = nv arguments = list(range(1)) arguments[0] = tuple(args) self.tf = Process.createTransformer(xmsf, arguments) self.node.normalize() task.advance(True) # we want to be notfied when the processing is done... self.tf.addListener(StreamListenerProcAdapter(self, self.streamTerminatedHandler, self.streamStartedHandler, self.streamClosedHandler, self.streamErrorHandler)) # create pipe pipeout = xmsf.createInstance("com.sun.star.io.Pipe") pipein = pipeout # connect sax writer to pipe self.xSaxWriter = xmsf.createInstance( "com.sun.star.xml.sax.Writer" ) self.xSaxWriter.setOutputStream(pipeout) # connect pipe to transformer self.tf.setInputStream(pipein) # connect transformer to output xOutputStream = fileAccess.xInterface.openFileWrite(fn) self.tf.setOutputStream(xOutputStream) self.tf.start() while (not self.tfCompleted): pass self.tf.terminate() task.advance(True) @classmethod def createTransformer(self, xmsf, args): tf = xmsf.createInstanceWithArguments("com.sun.star.xml.xslt.XSLT2Transformer", tuple(args)) if (tf is None): # TODO: put a dialog telling about the need to install # xslt2-transformer extension here tf = xmsf.createInstanceWithArguments("com.sun.star.xml.xslt.XSLTTransformer", tuple(args)) return tf def streamTerminatedHandler(self): parent.isTerminated = True def streamStartedHandler(self, parent): parent.tfCompleted = False parent.node.serialize(parent.xSaxWriter, tuple([StringPair()])) def streamErrorHandler(self, aException): print ("DEBUG !!! Stream 'error' event handler") def streamClosedHandler(self, parent): parent.tfCompleted = True # I broke the export method to two methods # in a time where a tree with more than one contents was planned. # I left it that way, because it may be used in the future. # @param task # @return def export(self, task): return self.export1(self.settings.cp_DefaultSession.cp_Content, self.tempDir, task) # This method could actually, with light modification, use recursion. # In the present situation, where we only use a "flat" list of # documents, instead of the original plan to use a tree, # the recursion is not implemented. # @param content the content ( directory-like, contains documents) # @param dir (target directory for exporting this content. # @param task # @return true if should continue def export1(self, content, folder, task): toPerform = 1 contentDir = None try: task.setSubtaskName(TASK_EXPORT_PREPARE) # 1. create a content directory. # each content (at the moment there is only one :-( ) # is created in its own directory. # faileure here is fatal. contentDir = self.fileAccess.createNewDir(folder, content.cp_Name); if (contentDir is None or contentDir is ""): raise IOException("Directory " + folder + " could not be created.") content.dirName = FileAccess.getFilename(contentDir) task.advance1(True, TASK_EXPORT_DOCUMENTS) toPerform -= 1 # 2. export all documents and sub contents. # (at the moment, only documents, no subcontents) for item in content.cp_Documents.childrenList: try: # # In present this is always the case. # may be in the future, when # a tree is used, it will be a bit different. if (isinstance (item, CGDocument)): if (not self.export2(item, contentDir, task)): return False elif (not self.export2(item, contentDir, task)): # we never get here since we # did not implement sub-contents. return False except SecurityException as sx: # nonfatal traceback.print_exc() if (not self.error(sx, item, ProcessErrors.ERROR_EXPORT_SECURITY, ErrorHandler.ERROR_NORMAL_IGNORE)): return False self.result = False except IOException as iox: # nonfatal traceback.print_exc() return self.error(iox, content, ProcessErrors.ERROR_EXPORT_IO, ErrorHandler.ERROR_NORMAL_IGNORE) except SecurityException as se: # nonfatal traceback.print_exc() return self.error(se, content, ProcessErrors.ERROR_EXPORT_SECURITY, ErrorHandler.ERROR_NORMAL_IGNORE) self.failTask(task, toPerform) return True # exports a single document # @param doc the document to export # @param dir the target directory # @param task task tracking # @return true if should continue def export2(self, doc, folder, task): # first I check if the document was already validated... if (not doc.valid): try: doc.validate(self.xmsf, task) except Exception as ex: # fatal traceback.print_exc() self.error(ex, doc, ProcessErrors.ERROR_DOC_VALIDATE, ErrorHandler.ERROR_PROCESS_FATAL) return False # get the exporter specified for this document exp = doc.cp_Exporter exporter = self.settings.cp_Exporters.getElement(exp) try: # here I calculate the destination filename. # I take the original filename (docFilename), subtract the extension, (docExt) -> (fn) # and find an available filename which starts with # this filename, but with the new extension. (destExt) docFilename = FileAccess.getFilename(doc.cp_URL) docExt = FileAccess.getExtension(docFilename) # filename without extension #fn = doc.localFilename.substring(0, doc.localFilename.length() - docExt.length() - 1) fn = doc.localFilename[:len(doc.localFilename) - len(docExt) - 1] # the copyExporter does not change # the extension of the target... destExt = FileAccess.getExtension(docFilename) \ if (exporter.cp_Extension is "") \ else exporter.cp_Extension # if this filter needs to export to its own directory... # this is the case in, for example, impress html export if (exporter.cp_OwnDirectory): # +++ folder = self.fileAccess.createNewDir(folder, fn) doc.dirName = FileAccess.getFilename(folder) # if two files with the same name # need to be exported ? So here # i get a new filename, so I do not # overwrite files... f = self.fileAccess.getNewFile(folder, fn, destExt) # set filename with extension. # this will be used by the exporter, # and to generate the TOC. doc.urlFilename = FileAccess.getFilename(f) task.advance(True) try: # export self.getExporter(exporter).export(doc, f, self.xmsf, task) task.advance(True) # getExporter(..) throws # IllegalAccessException, InstantiationException, ClassNotFoundException # export() throws Exception except Exception as ex: # nonfatal traceback.print_exc() if (not self.error(ex, doc, ProcessErrors.ERROR_EXPORT, ErrorHandler.ERROR_NORMAL_IGNORE)): return False except Exception as ex: # nonfatal traceback.print_exc() if (not self.error(ex, doc, ProcessErrors.ERROR_EXPORT_MKDIR, ErrorHandler.ERROR_NORMAL_ABORT)): return False return True # submit an error. # @param ex the exception # @param arg1 error argument # @param arg2 error argument 2 # @param errType error type # @return the interaction result def error(self, ex, arg1, arg2, errType): self.result = False return self.errorHandler.error(ex, arg1, arg2, errType) # advances the given task in the given count of steps, # marked as failed. # @param task the task to advance # @param count the number of steps to advance def failTask(self, task, count): while (count > 0): task.advance(False) count -= 1 # creates an instance of the exporter class # as specified by the # exporter object. # @param export specifies the exporter to be created # @return the Exporter instance # @throws ClassNotFoundException # @throws IllegalAccessException # @throws InstantiationException def createExporter(self, export): pkgname = ".".join(export.cp_ExporterClass.split(".")[3:]) className = export.cp_ExporterClass.split(".")[-1] mod = importlib.import_module(pkgname) return getattr(mod, className)(export) # searches the an exporter for the given CGExporter object # in the cache. # If its not there, creates it, stores it in the cache and # returns it. # @param export specifies the needed exporter. # @return an Exporter instance # @throws ClassNotFoundException thrown when using Class.forName(string) # @throws IllegalAccessException thrown when using Class.forName(string) # @throws InstantiationException thrown when using Class.forName(string) def getExporter(self, export): exp = self.exporters.get(export.cp_Name) if (exp is None): exp = self.createExporter(export) self.exporters[export.cp_Name] = exp return exp # @return true if everything went smooth, false # if error(s) accured. def getResult(self): return (self.myTask.getFailed() == 0) and self.result
import argparse import importlib import ast from .docscrape_sphinx import get_doc_object def main(argv=None): """Test numpydoc docstring generation for a given object""" ap = argparse.ArgumentParser(description=__doc__) ap.add_argument('import_path', help='e.g. numpy.ndarray') def _parse_config(s): key, _, value = s.partition('=') value = ast.literal_eval(value) return key, value ap.add_argument('-c', '--config', type=_parse_config, action='append', help='key=val where val will be parsed by literal_eval, ' 'e.g. -c use_plots=True. Multiple -c can be used.') args = ap.parse_args(argv) parts = args.import_path.split('.') for split_point in range(len(parts), 0, -1): try: path = '.'.join(parts[:split_point]) obj = importlib.import_module(path) except ImportError: continue break else: raise ImportError('Could not resolve {!r} to an importable object' ''.format(args.import_path)) for part in parts[split_point:]: obj = getattr(obj, part) print(get_doc_object(obj, config=dict(args.config or []))) if __name__ == '__main__': main()
from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ author: Ansible Core Team connection: persistent short_description: Use a persistent unix socket for connection description: - This is a helper plugin to allow making other connections persistent. version_added: "2.3" """ import os import pty import json import subprocess from ansible import constants as C from ansible.plugins.connection import ConnectionBase from ansible.module_utils._text import to_text from ansible.module_utils.six.moves import cPickle from ansible.module_utils.connection import Connection as SocketConnection from ansible.errors import AnsibleError try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class Connection(ConnectionBase): ''' Local based connections ''' transport = 'persistent' has_pipelining = False def _connect(self): self._connected = True return self def exec_command(self, cmd, in_data=None, sudoable=True): display.vvvv('exec_command(), socket_path=%s' % self.socket_path, host=self._play_context.remote_addr) connection = SocketConnection(self.socket_path) out = connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) return 0, out, '' def put_file(self, in_path, out_path): pass def fetch_file(self, in_path, out_path): pass def close(self): self._connected = False def run(self): """Returns the path of the persistent connection socket. Attempts to ensure (within playcontext.timeout seconds) that the socket path exists. If the path exists (or the timeout has expired), returns the socket path. """ display.vvvv('starting connection from persistent connection plugin', host=self._play_context.remote_addr) socket_path = self._start_connection() display.vvvv('local domain socket path is %s' % socket_path, host=self._play_context.remote_addr) setattr(self, '_socket_path', socket_path) return socket_path def _start_connection(self): ''' Starts the persistent connection ''' master, slave = pty.openpty() p = subprocess.Popen(["ansible-connection"], stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdin = os.fdopen(master, 'wb', 0) os.close(slave) # Need to force a protocol that is compatible with both py2 and py3. # That would be protocol=2 or less. # Also need to force a protocol that excludes certain control chars as # stdin in this case is a pty and control chars will cause problems. # that means only protocol=0 will work. src = cPickle.dumps(self._play_context.serialize(), protocol=0) stdin.write(src) stdin.write(b'\n#END_INIT#\n') (stdout, stderr) = p.communicate() stdin.close() if p.returncode == 0: result = json.loads(to_text(stdout, errors='surrogate_then_replace')) else: result = json.loads(to_text(stderr, errors='surrogate_then_replace')) if 'messages' in result: for msg in result.get('messages'): display.vvvv('%s' % msg, host=self._play_context.remote_addr) if 'error' in result: if self._play_context.verbosity > 2: msg = "The full traceback is:\n" + result['exception'] display.display(result['exception'], color=C.COLOR_ERROR) raise AnsibleError(result['error']) return result['socket_path']
import json def _load_coeffs(filename): with open(filename) as f: return json.load(f) def _evaluate(coeffs, x): return coeffs["a"] * x ** 2 + coeffs["b"] * x + coeffs["c"] if __name__ == "__main__": coeffs = _load_coeffs("coeffs.json") output = [_evaluate(coeffs, x) for x in range(10)] with open("poly_0.out", "w") as f: f.write("\n".join(map(str, output)))
""" This module is pretty messy - it is still very much under active development and will likely be changed a lot in the near future - don't depend on any of the functionality currently defined! """ import wx import re import wx.lib.buttons import wx.grid import numpy import os import avoplot from avoplot.series import XYDataSeries import loader class InvalidSelectionError(ValueError): pass class FileContentsPanel(wx.Panel): def __init__(self, parent, file_contents): wx.Panel.__init__(self, parent, wx.ID_ANY) box = wx.StaticBox(self, wx.ID_ANY, "File Contents") vsizer = wx.StaticBoxSizer(box, wx.VERTICAL) #create the rows/columns check boxes for selecting data format #add a drop-down panel for displaying the file header contents (if there is any) if file_contents.header: self.header_pane = wx.CollapsiblePane(self, wx.ID_ANY, "File Header") win = self.header_pane.GetPane() header_txt_ctrl = wx.TextCtrl(win, wx.ID_ANY, value=file_contents.header, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL) header_pane_sizer = wx.BoxSizer(wx.VERTICAL) header_pane_sizer.Add(header_txt_ctrl, 1, wx.GROW | wx.ALL, border=5) win.SetSizer(header_pane_sizer) header_pane_sizer.SetSizeHints(win) wx.EVT_COLLAPSIBLEPANE_CHANGED(self, self.header_pane.GetId(), self.on_expand) vsizer.Add(self.header_pane, 0, wx.GROW) else: self.header_pane = None #add the grid panel for data selection self.grid_panel = ColumnDataPanel(self, file_contents) vsizer.Add(self.grid_panel, 1, wx.EXPAND) #add a drop-down panel for displaying the file footer contents (if there is any) if file_contents.footer: self.footer_pane = wx.CollapsiblePane(self, wx.ID_ANY, "File Footer") win = self.footer_pane.GetPane() footer_txt_ctrl = wx.TextCtrl(win, wx.ID_ANY, value=file_contents.footer, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL) footer_pane_sizer = wx.BoxSizer(wx.VERTICAL) footer_pane_sizer.Add(footer_txt_ctrl, 1, wx.GROW | wx.ALL, border=5) win.SetSizer(footer_pane_sizer) footer_pane_sizer.SetSizeHints(win) wx.EVT_COLLAPSIBLEPANE_CHANGED(self, self.footer_pane.GetId(), self.on_expand) vsizer.Add(self.footer_pane, 0, wx.GROW) else: self.footer_pane = None self.SetSizer(vsizer) vsizer.Fit(self) self.SetAutoLayout(True) def on_expand(self, evnt): self.SendSizeEvent() def on_cols_chkbox(self, evnt): status = self.cols_checkbox.IsChecked() self.rows_checkbox.SetValue(not status) def on_rows_chkbox(self, evnt): status = self.rows_checkbox.IsChecked() self.cols_checkbox.SetValue(not status) def enable_select_mode(self, val, data_series): self.grid_panel.enable_select_mode(val, data_series) if val: #self.cols_checkbox.Disable() #self.rows_checkbox.Disable() if self.header_pane is not None: self.header_pane.Disable() if self.footer_pane is not None: self.footer_pane.Disable() else: #self.cols_checkbox.Enable() #self.rows_checkbox.Enable() if self.header_pane is not None: self.header_pane.Enable() if self.footer_pane is not None: self.footer_pane.Enable() class ColumnDataPanel(wx.ScrolledWindow): def __init__(self, parent, file_contents): wx.ScrolledWindow.__init__(self, parent, wx.ID_ANY) self.SetScrollRate(5,5) self.file_contents = file_contents n_cols = file_contents.get_number_of_columns() n_rows = file_contents.get_number_of_rows() vsizer = wx.BoxSizer(wx.VERTICAL) #create cells self.grid = wx.grid.Grid(self, wx.ID_ANY) self.grid.EnableGridLines(False) self.grid.CreateGrid(n_rows, n_cols) self.col_letter_names = [] for c, col in enumerate(file_contents.get_columns()): self.col_letter_names.append(self.grid.GetColLabelValue(c)) if col.title and not col.title.isspace(): self.grid.SetColLabelValue(c, ''.join([self.grid.GetColLabelValue(c),'\n',col.title])) for r, data in enumerate(col.raw_data): self.grid.SetCellValue(r, c, data) self.grid.AutoSize() #set the size of the grid to be big enough that the grid's scrollbars are #not enabled self.grid.SetSize(self.grid.GetBestVirtualSize()) #self.grid.DisableDragColSize() #self.grid.DisableDragRowSize() #create choice boxes for data types self.data_type_sizer = wx.BoxSizer(wx.HORIZONTAL) text = wx.StaticText(self, wx.ID_ANY,"Data type:") self.data_type_sizer.Add(text,0,wx.ALIGN_LEFT| wx.ALIGN_CENTER_VERTICAL) self.data_type_sizer.AddSpacer(self.grid.GetRowLabelSize()-text.GetSize()[0]) self.data_type_choices = {} self.choices_list = [] self.dtypes = ["number", "text"] for col in file_contents.get_columns(): choice = wx.Choice(self, wx.ID_ANY, choices=self.dtypes) choice.SetSelection(self.dtypes.index(col.get_data_type())) self.choices_list.append(choice) self.data_type_choices[choice.GetId()] = (choice, col) self.data_type_sizer.Add(choice,0,wx.ALIGN_CENTER_VERTICAL|wx.GROW) #register the event handler for changing the columns dtype wx.EVT_CHOICE(self, choice.GetId(), self.on_change_col_dtype) #match the data type choice box sizes to the grid column sizes for idx, choice in enumerate(self.choices_list): choice_size = choice.GetSize()[0] col_size = self.grid.GetColSize(idx) if choice_size < col_size: choice.SetMinSize((col_size,-1)) else: self.grid.SetColSize(idx,choice_size) self.grid.SetColMinimalWidth(idx,choice_size) wx.grid.EVT_GRID_CMD_COL_SIZE(self, self.grid.GetId(), self.on_column_resize) vsizer.Add(self.data_type_sizer, 0, wx.EXPAND) vsizer.Add(self.grid, 1, wx.EXPAND) self.SetSizer(vsizer) vsizer.Fit(self) #self.grid.EnableGridLines(True) def on_change_col_dtype(self, evnt): choice, col = self.data_type_choices[evnt.GetId()] new_dtype = self.dtypes[choice.GetSelection()] try: col.set_data_type(new_dtype) except loader.InvalidDataTypeError: choice.SetSelection(self.dtypes.index(col.get_data_type())) wx.MessageBox("Failed to interpret the data as type \'%s\'"%new_dtype, avoplot.PROG_SHORT_NAME, wx.ICON_EXCLAMATION) def get_selection(self): """ Returns a tuple (selection string, col_idx, data mask) where selection string is a human readable string of the selection made, col_idx is the index of the column that the mask relates to and data mask is a numpy mask array where True indicates a selection and False indicates a value to mask out. """ cols_selected = self.grid.GetSelectedCols() cells_selected = self.grid.GetSelectedCells() blocks_TL_selected = self.grid.GetSelectionBlockTopLeft() blocks_BR_selected = self.grid.GetSelectionBlockBottomRight() if cols_selected: #only complete columns have been selected (or at least if other cells have also been #selected then they are invalid) if (len(cols_selected) > 1 or blocks_TL_selected or cells_selected): raise InvalidSelectionError("You cannot select data from more than one column for an axis data series.") if self.file_contents.get_columns()[cols_selected[0]].get_data_type() == 'text': raise InvalidSelectionError("You cannot plot text as a data series.") selection_str = '%s[:]'%self.file_contents.get_col_name(cols_selected[0]) return selection_str if not (cells_selected or blocks_BR_selected): #No selection made return "" #otherwise we have a selection of blocks of cells and individual cells to sort out #first check that they are all from the same column cols = set([c for r,c in cells_selected] + [c for r,c in blocks_TL_selected] + [c for r,c in blocks_BR_selected]) if len(cols) != 1: raise InvalidSelectionError("You cannot select data from more than one column for an axis data series.") col_idx = cols.pop() if self.file_contents.get_columns()[col_idx].get_data_type() == 'text': raise InvalidSelectionError("You cannot plot text as a data series.") #create a list of (start_row, end_row) tuples for all the cells and blocks selected start_idxs = [r for r,c in blocks_TL_selected] + [r for r,c in cells_selected] end_idxs = [r for r,c in blocks_BR_selected] + [r for r,c in cells_selected] selections = zip(start_idxs, end_idxs) #sort them into row order tuple_compare = lambda x1,x2: cmp(x1[0], x2[0]) selections.sort(cmp=tuple_compare) #we +1 to the row numbers because numbering starts at 1 for the row labels but at 0 #for their indices selection_str = ', '.join(['%s[%d:%d]'%(self.col_letter_names[col_idx], start+1, end+1) for start,end in selections]) return selection_str def _on_mouse_motion(self, evnt): self.grid.GetTargetWindow().SetCursor(wx.CROSS_CURSOR) evnt.Skip() def enable_select_mode(self, val, data_series): self.set_editable(not val) if val: self.grid.GetTargetWindow().Bind(wx.EVT_MOTION, self._on_mouse_motion) else: self.grid.GetTargetWindow().SetCursor(wx.NullCursor) self.grid.GetTargetWindow().Unbind(wx.EVT_MOTION) if val: self.grid.ClearSelection() for choice, col in self.data_type_choices.values(): choice.Disable() else: for choice, col in self.data_type_choices.values(): choice.Enable() try: selection = self.get_selection() except InvalidSelectionError,e: wx.MessageBox(e.args[0], avoplot.PROG_SHORT_NAME, wx.ICON_EXCLAMATION) selection = "" data_series.set_selection(selection) def set_editable(self, value): self.grid.EnableDragGridSize(value) self.grid.EnableDragColSize(value) self.grid.EnableDragRowSize(value) self.grid.EnableEditing(value) def on_column_resize(self, evnt): """ Handle column resize events - this requires all the data_type choices to be resized to match the columns """ for col_num, choice in enumerate(self.choices_list): col_size = self.grid.GetColSize(col_num) choice.SetMinSize((col_size,-1)) self.data_type_sizer.Layout() class XYDataSeriesPanel(wx.Panel): def __init__(self, parent, file_contents, main_frame): self.__selecting_x = False self.file_contents = file_contents wx.Panel.__init__(self, parent, wx.ID_ANY) self.hsizer = wx.BoxSizer(wx.HORIZONTAL) self.main_frame = main_frame self.xseries_box = wx.TextCtrl(self, wx.ID_ANY) self.yseries_box = wx.TextCtrl(self, wx.ID_ANY) button_sz = self.yseries_box.GetSize()[1] self.add_button = wx.BitmapButton(self, wx.ID_ANY, wx.ArtProvider.GetBitmap("avoplot_add",wx.ART_BUTTON)) self.remove_button = wx.BitmapButton(self, wx.ID_ANY, wx.ArtProvider.GetBitmap("avoplot_remove",wx.ART_BUTTON)) self.select_x_button = wx.lib.buttons.ThemedGenBitmapToggleButton(self, wx.ID_ANY, wx.ArtProvider.GetBitmap("avoplot_col_select",wx.ART_BUTTON), size=(button_sz,button_sz)) self.select_y_button = wx.lib.buttons.ThemedGenBitmapToggleButton(self, wx.ID_ANY, wx.ArtProvider.GetBitmap("avoplot_col_select",wx.ART_BUTTON), size=(button_sz,button_sz)) wx.EVT_BUTTON(self, self.select_x_button.GetId(), self.on_select_x_series) wx.EVT_BUTTON(self, self.select_y_button.GetId(), self.on_select_y_series) self.hsizer.Add(wx.StaticText(self, wx.ID_ANY, "x data: "),0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT) self.hsizer.Add(self.xseries_box, 1, wx.ALIGN_CENTER_VERTICAL| wx.ALIGN_LEFT) self.hsizer.Add(self.select_x_button,0,wx.ALIGN_CENTER_VERTICAL| wx.ALIGN_LEFT) self.hsizer.AddSpacer(10) self.hsizer.Add(wx.StaticText(self, wx.ID_ANY, "y data: "),0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT) self.hsizer.Add(self.yseries_box, 1, wx.ALIGN_CENTER_VERTICAL| wx.ALIGN_LEFT) self.hsizer.Add(self.select_y_button,0,wx.ALIGN_CENTER_VERTICAL| wx.ALIGN_LEFT) self.hsizer.AddSpacer(10) self.hsizer.Add(self.remove_button,0, wx.ALIGN_CENTER_VERTICAL| wx.ALIGN_LEFT| wx.RESERVE_SPACE_EVEN_IF_HIDDEN) self.hsizer.Add(self.add_button,0, wx.ALIGN_CENTER_VERTICAL| wx.ALIGN_LEFT| wx.RESERVE_SPACE_EVEN_IF_HIDDEN) self.SetSizer(self.hsizer) self.hsizer.Fit(self) self.SetAutoLayout(True) def enable_select_mode(self, val, series): if val: self.xseries_box.Disable() self.yseries_box.Disable() self.select_x_button.Disable() self.select_y_button.Disable() self.add_button.Disable() self.remove_button.Disable() else: self.xseries_box.Enable() self.yseries_box.Enable() self.select_x_button.Enable() self.select_y_button.Enable() self.add_button.Enable() self.remove_button.Enable() def plot_into_axes(self, axes): xdata = self.get_x_series_data() ydata = self.get_y_series_data() if xdata is None and ydata is None: return if xdata is None: axes.plot(numpy.arange(len(ydata)),ydata) elif ydata is None: axes.plot(xdata,numpy.arange(len(xdata))) else: axes.plot(xdata, ydata) def get_series_data(self): """ Returns a tuple of (xdata, ydata) """ xdata = self.get_x_series_data() ydata = self.get_y_series_data() if xdata is None and ydata is None: return if xdata is None: return (numpy.arange(len(ydata)),ydata) elif ydata is None: return (xdata,numpy.arange(len(xdata))) else: return (xdata, ydata) def get_x_series_data(self): return self.__get_data_selection(self.xseries_box.GetValue(), False) def get_y_series_data(self): return self.__get_data_selection(self.yseries_box.GetValue(), False) def validate_selection(self, row_selection): self._validate_selection_str(self.xseries_box.GetValue(), row_selection) self._validate_selection_str(self.yseries_box.GetValue(), row_selection) def _validate_selection_str(self, selection_str, row_selection=False): if row_selection: raise NotImplementedError("Selecting rows as data series is not implemented yet!") if not selection_str or selection_str.isspace(): return [] selection_blocks = selection_str.split(',') if row_selection: raise NotImplementedError("Selecting rows as data series is not implemented yet!") else: regexp = re.compile(r''' (?:^\s*(?P<column>[A-Z]+) #matches column name \s*\[\s*(?P<lower_bound>[0-9]*) #matches lower bound number (if there is one) \s*:\s* (?P<upper_bound>[0-9]*)\s*\]\s*$) #matches upped bound number (if there is one)''', flags=re.VERBOSE) cols = set() selection_params = [] for block in selection_blocks: match = regexp.match(block) if match is None: #then there is a syntax error in the selection string raise InvalidSelectionError("Syntax error in selection string. \'%s\' is not a valid selection, expecting something of the form \'A[2:8]\'."%block) params = match.groupdict() selection_params.append(params) cols.add(params['column']) n_rows = self.file_contents.get_column_by_name(params['column']).get_number_of_rows() if not params['lower_bound']: params['lower_bound'] = '1' if not params['upper_bound']: params['upper_bound'] = str(n_rows) lower_bound = int(params['lower_bound']) upper_bound = int(params['upper_bound']) if lower_bound < 1: raise InvalidSelectionError("Value error in selection string. \'%s\' is not a valid selection, lower bound must be greater than zero."%block) if lower_bound > upper_bound: raise InvalidSelectionError("Value error in selection string. \'%s\' is not a valid selection, upper bound cannot be smaller than lower bound."%block) if upper_bound > n_rows: raise InvalidSelectionError("Value error in selection string. \'%s\' is not a valid selection, upper bound is outside data range."%block) if len(cols) != 1: raise InvalidSelectionError("Selection cannot contain data from multiple columns.") return selection_params def __get_data_selection(self, selection_str, row_selection=False): """ Given a selection string (of the form "A[1:20], A[23:25]", returns a masked array of the requested data. """ if row_selection: raise NotImplementedError("Selecting rows as data series is not implemented yet!") if not selection_str or selection_str.isspace(): #empty string - no selection made return None selection_params = self._validate_selection_str(selection_str, row_selection) #see if we have any complete column selections - life is easy if we do! blocks = [] for s in selection_params: #-1 because array indexing starts at 0 but row indexing starts at 1 blocks.append((int(s['lower_bound'])-1,int(s['upper_bound'])-1)) #otherwise build a mask for the selection column = self.file_contents.get_column_by_name(selection_params[0]['column']) data_mask = column.get_data_mask() #sort selection blocks into row order tuple_compare = lambda x1,x2: cmp(x1[0], x2[0]) blocks.sort(cmp=tuple_compare) selection_mask = numpy.ones_like(data_mask) for start,end in blocks: selection_mask[start:end+1] = False mask = numpy.logical_or(data_mask, selection_mask) return numpy.ma.masked_array(column.get_data(), mask=mask) def get_add_button_id(self): return self.add_button.GetId() def get_remove_button_id(self): return self.remove_button.GetId() def set_button_visibility(self, add_button, remove_button): self.add_button.Show(add_button) self.remove_button.Show(remove_button) self.hsizer.Layout() def on_select_x_series(self, evnt): if self.select_x_button.GetToggle(): self.__selecting_x = True self.main_frame.enable_select_mode(True, self) self.select_x_button.Enable() else: self.main_frame.enable_select_mode(False, self) self.__selecting_x = False def on_select_y_series(self, evnt): if self.select_y_button.GetToggle(): self.__selecting_x = False self.main_frame.enable_select_mode(True, self) self.select_y_button.Enable() else: self.main_frame.enable_select_mode(False, self) def set_selection(self, selection_str): if self.__selecting_x: self.xseries_box.SetValue(selection_str) else: self.yseries_box.SetValue(selection_str) class DataSeriesSelectPanelContainer(wx.SashWindow): def __init__(self, parent, main_frame, file_contents): wx.SashWindow.__init__(self, parent, wx.ID_ANY) vsizer = wx.BoxSizer(wx.VERTICAL) vsizer.Add(DataSeriesSelectPanel(self, main_frame, file_contents),1,wx.EXPAND) self.SetSashVisible(wx.SASH_TOP, True) self.SetSizer(vsizer) vsizer.Fit(self) class DataSeriesSelectPanel(wx.ScrolledWindow): def __init__(self, parent, main_frame, file_contents): wx.ScrolledWindow.__init__(self, parent, wx.ID_ANY) self.SetScrollRate(5,5) self.file_contents = file_contents box = wx.StaticBox(self, wx.ID_ANY, "Data Series") self.vsizer = wx.StaticBoxSizer(box, wx.VERTICAL) self.main_frame = main_frame self.data_series = [] self.data_series_id_mapping = {} #{id:index in data_series} self.on_add_data_series(None) self.SetSizer(self.vsizer) self.vsizer.Fit(self) self.SetAutoLayout(True) def enable_select_mode(self, val, series): for s in self.data_series: s.enable_select_mode(val, series) def on_add_data_series(self, evnt): series = XYDataSeriesPanel(self, self.file_contents, self.main_frame) #remove button only if it is not the first series series.set_button_visibility(True, bool(self.data_series)) self.vsizer.Add(series,1, wx.EXPAND | wx.ALIGN_TOP) if self.data_series: self.data_series[-1].set_button_visibility(False, True) self.data_series.append(series) self.data_series_id_mapping[series.get_remove_button_id()] = series wx.EVT_BUTTON(self, self.data_series[-1].get_add_button_id(), self.on_add_data_series) wx.EVT_BUTTON(self, self.data_series[-1].get_remove_button_id(), self.on_remove_data_series) self.SendSizeEvent() def on_remove_data_series(self, evnt): id_ = evnt.Id series = self.data_series_id_mapping[id_] self.data_series_id_mapping.pop(id_) idx = self.data_series.index(series) if idx == len(self.data_series) -1: self.data_series[-2].set_button_visibility(True, len(self.data_series)>2) elif idx == 0: self.data_series[1].set_button_visibility(len(self.data_series)<3, len(self.data_series)>2) elif len(self.data_series)<3: self.data_series[0].set_button_visibility(True, False) self.data_series[idx].Destroy() self.data_series.remove(series) self.SendSizeEvent() class TxtFileDataSeriesSelectFrame(wx.Dialog): def __init__(self, parent, file_contents): #set the title to the file name frame_title = "%s - Data Select - %s" %(file_contents.filename,avoplot.PROG_SHORT_NAME) wx.Dialog.__init__(self, parent, wx.ID_ANY, frame_title, style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER|wx.MAXIMIZE_BOX|wx.MINIMIZE_BOX) self.parent = parent self.filename = file_contents.filename #set up the icon for the frame self.SetIcon(wx.ArtProvider.GetIcon("avoplot")) #create top level panel to hold all frame elements top_panel = wx.Panel(self, wx.ID_ANY) #create top level sizer to contain all frame elements topsizer = wx.BoxSizer(wx.VERTICAL) vsizer = wx.BoxSizer(wx.VERTICAL) topsizer.AddSpacer(5) topsizer.Add(vsizer, 1, wx.EXPAND) #create all the frame elements self.splitter = wx.SplitterWindow(top_panel, -1) self.splitter.SetMinimumPaneSize(50) self.file_contents_panel = FileContentsPanel(self.splitter, file_contents) self.data_series_panel = DataSeriesSelectPanel(self.splitter, self, file_contents) width,sash_pos = self.data_series_panel.GetSizeTuple() self.splitter.SplitHorizontally(self.file_contents_panel, self.data_series_panel, -2*sash_pos) vsizer.Add(self.splitter,1 , wx.EXPAND|wx.ALL, border=5) self.SetSize((width + 60, -1)) #create main buttons buttons_sizer = wx.BoxSizer(wx.HORIZONTAL) self.plot_button = wx.Button(top_panel, wx.ID_ANY, "Plot") self.cancel_button = wx.Button(top_panel, wx.ID_ANY, "Cancel") buttons_sizer.Add(self.cancel_button, 1, wx.ALIGN_RIGHT | wx.ALIGN_BOTTOM) buttons_sizer.Add(self.plot_button, 1, wx.ALIGN_RIGHT | wx.ALIGN_BOTTOM) wx.EVT_BUTTON(self, self.plot_button.GetId(), self.on_plot) wx.EVT_BUTTON(self, self.cancel_button.GetId(), self.on_cancel) vsizer.Add(buttons_sizer, 0, wx.ALL | wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT, border=10) #configure layout and position top_panel.SetSizer(topsizer) topsizer.Fit(top_panel) top_panel.SetAutoLayout(True) self.Center(wx.BOTH) self.SendSizeEvent() #force redraw (only needed for windows) self.splitter.SetSashGravity(0.5) self.Show() def on_plot(self, evnt): data_flag = False for series in self.data_series_panel.data_series: try: #TODO - read the row data status from the checkbox series.validate_selection(False) if series.get_series_data(): #TODO - this is a hack for now! data_flag=True except InvalidSelectionError,e: wx.MessageBox(e.args[0], avoplot.PROG_SHORT_NAME, wx.ICON_ERROR) return if not data_flag: wx.MessageBox("No data series selected!", avoplot.PROG_SHORT_NAME, wx.ICON_ERROR) return self.EndModal(wx.ID_OK) def on_cancel(self, evnt): wx.SetCursor(wx.NullCursor) #self.file_contents_panel.SetCursor(wx.NullCursor) self.EndModal(wx.ID_CANCEL) def enable_select_mode(self, val, data_series): self.file_contents_panel.enable_select_mode(val, data_series) self.data_series_panel.enable_select_mode(val, data_series) self.plot_button.Enable(enable=(not val)) #if val: # wx.SetCursor(wx.CROSS_CURSOR) #self.file_contents_panel.SetCursor(wx.CROSS_CURSOR) #else: # wx.SetCursor(wx.NullCursor) #self.file_contents_panel.SetCursor(wx.NullCursor) def get_series(self): series = [] for s in self.data_series_panel.data_series: data = s.get_series_data() if data: series.append(XYDataSeries(os.path.basename(self.filename),xdata=data[0], ydata=data[1])) return series
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: icx_ping version_added: "2.9" author: "Ruckus Wireless (@Commscope)" short_description: Tests reachability using ping from Ruckus ICX 7000 series switches description: - Tests reachability using ping from switch to a remote destination. notes: - Tested against ICX 10.1 options: count: description: - Number of packets to send. Default is 1. type: int dest: description: - ip-addr | host-name | vrf vrf-name | ipv6 [ ipv6-addr | host-name | vrf vrf-name] (resolvable by switch) of the remote node. required: true type: str timeout: description: - Specifies the time, in milliseconds for which the device waits for a reply from the pinged device. The value can range from 1 to 4294967296. The default is 5000 (5 seconds). type: int ttl: description: - Specifies the time to live as a maximum number of hops. The value can range from 1 to 255. The default is 64. type: int size: description: - Specifies the size of the ICMP data portion of the packet, in bytes. This is the payload and does not include the header. The value can range from 0 to 10000. The default is 16.. type: int source: description: - IP address to be used as the origin of the ping packets. type: str vrf: description: - Specifies the Virtual Routing and Forwarding (VRF) instance of the device to be pinged. type: str state: description: - Determines if the expected result is success or fail. type: str choices: [ absent, present ] default: present """ EXAMPLES = r''' - name: Test reachability to 10.10.10.10 icx_ping: dest: 10.10.10.10 - name: Test reachability to ipv6 address from source with timeout icx_ping: dest: ipv6 2001:cdba:0000:0000:0000:0000:3257:9652 source: 10.1.1.1 timeout: 100000 - name: Test reachability to 10.1.1.1 through vrf using 5 packets icx_ping: dest: 10.1.1.1 vrf: x.x.x.x count: 5 - name: Test unreachability to 10.30.30.30 icx_ping: dest: 10.40.40.40 state: absent - name: Test reachability to ipv4 with ttl and packet size icx_ping: dest: 10.10.10.10 ttl: 20 size: 500 ''' RETURN = ''' commands: description: Show the command sent. returned: always type: list sample: ["ping 10.40.40.40 count 20 source loopback0", "ping 10.40.40.40"] packet_loss: description: Percentage of packets lost. returned: always type: str sample: "0%" packets_rx: description: Packets successfully received. returned: always type: int sample: 20 packets_tx: description: Packets successfully transmitted. returned: always type: int sample: 20 rtt: description: Show RTT stats. returned: always type: dict sample: {"avg": 2, "max": 8, "min": 1} ''' from ansible.module_utils._text import to_text from ansible.module_utils.network.icx.icx import run_commands from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection, ConnectionError import re def build_ping(dest, count=None, source=None, timeout=None, ttl=None, size=None, vrf=None): """ Function to build the command to send to the terminal for the switch to execute. All args come from the module's unique params. """ if vrf is not None: cmd = "ping vrf {0} {1}".format(vrf, dest) else: cmd = "ping {0}".format(dest) if count is not None: cmd += " count {0}".format(str(count)) if timeout is not None: cmd += " timeout {0}".format(str(timeout)) if ttl is not None: cmd += " ttl {0}".format(str(ttl)) if size is not None: cmd += " size {0}".format(str(size)) if source is not None: cmd += " source {0}".format(source) return cmd def parse_ping(ping_stats): """ Function used to parse the statistical information from the ping response. Example: "Success rate is 100 percent (5/5), round-trip min/avg/max=40/51/55 ms." Returns the percent of packet loss, recieved packets, transmitted packets, and RTT dict. """ if ping_stats.startswith('Success'): rate_re = re.compile(r"^\w+\s+\w+\s+\w+\s+(?P<pct>\d+)\s+\w+\s+\((?P<rx>\d+)/(?P<tx>\d+)\)") rtt_re = re.compile(r".*,\s+\S+\s+\S+=(?P<min>\d+)/(?P<avg>\d+)/(?P<max>\d+)\s+\w+\.+\s*$|.*\s*$") rate = rate_re.match(ping_stats) rtt = rtt_re.match(ping_stats) return rate.group("pct"), rate.group("rx"), rate.group("tx"), rtt.groupdict() else: rate_re = re.compile(r"^Sending+\s+(?P<tx>\d+),") rate = rate_re.match(ping_stats) rtt = {'avg': 0, 'max': 0, 'min': 0} return 0, 0, rate.group('tx'), rtt def validate_results(module, loss, results): """ This function is used to validate whether the ping results were unexpected per "state" param. """ state = module.params["state"] if state == "present" and loss == 100: module.fail_json(msg="Ping failed unexpectedly", **results) elif state == "absent" and loss < 100: module.fail_json(msg="Ping succeeded unexpectedly", **results) def validate_fail(module, responses): if ("Success" in responses or "No reply" in responses) is False: module.fail_json(msg=responses) def validate_parameters(module, timeout, count): if timeout and not 1 <= int(timeout) <= 4294967294: module.fail_json(msg="bad value for timeout - valid range (1-4294967294)") if count and not 1 <= int(count) <= 4294967294: module.fail_json(msg="bad value for count - valid range (1-4294967294)") def main(): """ main entry point for module execution """ argument_spec = dict( count=dict(type="int"), dest=dict(type="str", required=True), timeout=dict(type="int"), ttl=dict(type="int"), size=dict(type="int"), source=dict(type="str"), state=dict(type="str", choices=["absent", "present"], default="present"), vrf=dict(type="str") ) module = AnsibleModule(argument_spec=argument_spec) count = module.params["count"] dest = module.params["dest"] source = module.params["source"] timeout = module.params["timeout"] ttl = module.params["ttl"] size = module.params["size"] vrf = module.params["vrf"] results = {} warnings = list() if warnings: results["warnings"] = warnings response = '' try: validate_parameters(module, timeout, count) results["commands"] = [build_ping(dest, count, source, timeout, ttl, size, vrf)] ping_results = run_commands(module, commands=results["commands"]) ping_results_list = ping_results[0].split("\n") except ConnectionError as exc: module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) validate_fail(module, ping_results[0]) stats = "" statserror = '' for line in ping_results_list: if line.startswith('Sending'): statserror = line if line.startswith('Success'): stats = line elif line.startswith('No reply'): stats = statserror success, rx, tx, rtt = parse_ping(stats) loss = abs(100 - int(success)) results["packet_loss"] = str(loss) + "%" results["packets_rx"] = int(rx) results["packets_tx"] = int(tx) # Convert rtt values to int for k, v in rtt.items(): if rtt[k] is not None: rtt[k] = int(v) results["rtt"] = rtt validate_results(module, loss, results) module.exit_json(**results) if __name__ == '__main__': main()
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: bigip_policy_rule short_description: Manage LTM policy rules on a BIG-IP description: - This module will manage LTM policy rules on a BIG-IP. version_added: 2.5 options: actions: description: - The actions that you want the policy rule to perform. - The available attributes vary by the action, however, each action requires that a C(type) be specified. - These conditions can be specified in any order. Despite them being a list, the BIG-IP does not treat their order as anything special. - Available C(type) values are C(forward). suboptions: type: description: - The action type. This value controls what below options are required. - When C(type) is C(forward), will associate a given C(pool) with this rule. - When C(type) is C(enable), will associate a given C(asm_policy) with this rule. - When C(type) is C(ignore), will remove all existing actions from this rule. required: true choices: [ 'forward', 'enable', 'ignore' ] pool: description: - Pool that you want to forward traffic to. - This parameter is only valid with the C(forward) type. asm_policy: description: - ASM policy to enable. - This parameter is only valid with the C(enable) type. policy: description: - The name of the policy that you want to associate this rule with. required: True name: description: - The name of the rule. required: True conditions: description: - A list of attributes that describe the condition. - See suboptions for details on how to construct each list entry. - The ordering of this list is important, the module will ensure the order is kept when modifying the task. - The suboption options listed below are not required for all condition types, read the description for more details. - These conditions can be specified in any order. Despite them being a list, the BIG-IP does not treat their order as anything special. suboptions: type: description: - The condition type. This value controls what below options are required. - When C(type) is C(http_uri), will associate a given C(path_begins_with_any) list of strings with which the HTTP URI should begin with. Any item in the list will provide a match. - When C(type) is C(all_traffic), will remove all existing conditions from this rule. required: true choices: [ 'http_uri', 'all_traffic' ] path_begins_with_any: description: - A list of strings of characters that the HTTP URI should start with. - This parameter is only valid with the C(http_uri) type. state: description: - When C(present), ensures that the key is uploaded to the device. When C(absent), ensures that the key is removed from the device. If the key is currently in use, the module will not be able to remove the key. default: present choices: - present - absent partition: description: - Device partition to manage resources on. default: Common extends_documentation_fragment: f5 requirements: - BIG-IP >= v12.1.0 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Create policies bigip_policy: name: Policy-Foo state: present delegate_to: localhost - name: Add a rule to the new policy bigip_policy_rule: policy: Policy-Foo name: rule3 conditions: - type: http_uri path_begins_with_any: /ABC actions: - type: forward pool: pool-svrs - name: Add multiple rules to the new policy bigip_policy_rule: policy: Policy-Foo name: "{{ item.name }}" conditions: "{{ item.conditions }}" actions: "{{ item.actions }}" loop: - name: rule1 actions: - type: forward pool: pool-svrs conditions: - type: http_uri path_starts_with: /euro - name: rule2 actions: - type: forward pool: pool-svrs conditions: - type: http_uri path_starts_with: /HomePage/ - name: Remove all rules and confitions from the rule bigip_policy_rule: policy: Policy-Foo name: rule1 conditions: - type: all_traffic actions: - type: ignore ''' RETURN = r''' actions: description: The new list of actions applied to the rule returned: changed type: complex contains: type: description: The action type returned: changed type: string sample: forward pool: description: Pool for forward to returned: changed type: string sample: foo-pool sample: hash/dictionary of values conditions: description: The new list of conditions applied to the rule. returned: changed type: complex contains: type: description: The condition type returned: changed type: string sample: http_uri path_begins_with_any: description: List of strings that the URI begins with. returned: changed type: list sample: [foo, bar] sample: hash/dictionary of values description: description: The new description of the rule. returned: changed type: string sample: My rule ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback from ansible.module_utils.six import iteritems HAS_DEVEL_IMPORTS = False try: # Sideband repository used for dev from library.module_utils.network.f5.bigip import HAS_F5SDK from library.module_utils.network.f5.bigip import F5Client from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import fqdn_name from library.module_utils.network.f5.common import f5_argument_spec try: from library.module_utils.network.f5.common import iControlUnexpectedHTTPError except ImportError: HAS_F5SDK = False HAS_DEVEL_IMPORTS = True except ImportError: # Upstream Ansible from ansible.module_utils.network.f5.bigip import HAS_F5SDK from ansible.module_utils.network.f5.bigip import F5Client from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import fqdn_name from ansible.module_utils.network.f5.common import f5_argument_spec try: from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError except ImportError: HAS_F5SDK = False class Parameters(AnsibleF5Parameters): api_map = { 'actionsReference': 'actions', 'conditionsReference': 'conditions' } api_attributes = [ 'description', 'actions', 'conditions' ] updatables = [ 'actions', 'conditions', 'description' ] def _fqdn_name(self, value): if value is not None and not value.startswith('/'): return '/{0}/{1}'.format(self.partition, value) return value @property def name(self): return self._values.get('name', None) @property def description(self): return self._values.get('description', None) @property def strategy(self): if self._values['strategy'] is None: return None result = self._fqdn_name(self._values['strategy']) return result @property def policy(self): if self._values['policy'] is None: return None return self._values['policy'] class ApiParameters(Parameters): def _remove_internal_keywords(self, resource): items = ['kind', 'generation', 'selfLink', 'poolReference'] for item in items: try: del resource[item] except KeyError: pass @property def actions(self): result = [] if self._values['actions'] is None or 'items' not in self._values['actions']: return [dict(type='ignore')] for item in self._values['actions']['items']: action = dict() self._remove_internal_keywords(item) if 'forward' in item: action.update(item) action['type'] = 'forward' del action['forward'] elif 'enable' in item: action.update(item) action['type'] = 'enable' del action['enable'] result.append(action) result = sorted(result, key=lambda x: x['name']) return result @property def conditions(self): result = [] if self._values['conditions'] is None or 'items' not in self._values['conditions']: return [dict(type='all_traffic')] for item in self._values['conditions']['items']: action = dict() self._remove_internal_keywords(item) if 'httpUri' in item: action.update(item) action['type'] = 'http_uri' del action['httpUri'] # Converts to common stringiness # # The tuple set "issubset" check that happens in the Difference # engine does not recognize that a u'foo' and 'foo' are equal "enough" # to consider them a subset. Therefore, we cast everything here to # whatever the common stringiness is. if 'values' in action: action['values'] = [str(x) for x in action['values']] result.append(action) # Names contains the index in which the rule is at. result = sorted(result, key=lambda x: x['name']) return result class ModuleParameters(Parameters): @property def actions(self): result = [] if self._values['actions'] is None: return None for idx, item in enumerate(self._values['actions']): action = dict() if 'name' in item: action['name'] = str(item['name']) else: action['name'] = str(idx) if item['type'] == 'forward': self._handle_forward_action(action, item) elif item['type'] == 'enable': self._handle_enable_action(action, item) elif item['type'] == 'ignore': return [dict(type='ignore')] result.append(action) result = sorted(result, key=lambda x: x['name']) return result @property def conditions(self): result = [] if self._values['conditions'] is None: return None for idx, item in enumerate(self._values['conditions']): action = dict() if 'name' in item: action['name'] = str(item['name']) else: action['name'] = str(idx) if item['type'] == 'http_uri': self._handle_http_uri_condition(action, item) elif item['type'] == 'all_traffic': return [dict(type='all_traffic')] result.append(action) result = sorted(result, key=lambda x: x['name']) return result def _handle_http_uri_condition(self, action, item): """Handle the nuances of the forwarding type Right now there is only a single type of forwarding that can be done. As that functionality expands, so-to will the behavior of this, and other, methods. Therefore, do not be surprised that the logic here is so rigid. It's deliberate. :param action: :param item: :return: """ action['type'] = 'http_uri' if 'path_begins_with_any' not in item: raise F5ModuleError( "A 'path_begins_with_any' must be specified when the 'http_uri' type is used." ) if isinstance(item['path_begins_with_any'], list): values = item['path_begins_with_any'] else: values = [item['path_begins_with_any']] action.update(dict( path=True, startsWith=True, values=values )) def _handle_forward_action(self, action, item): """Handle the nuances of the forwarding type Right now there is only a single type of forwarding that can be done. As that functionality expands, so-to will the behavior of this, and other, methods. Therefore, do not be surprised that the logic here is so rigid. It's deliberate. :param action: :param item: :return: """ action['type'] = 'forward' if 'pool' not in item: raise F5ModuleError( "A 'pool' must be specified when the 'forward' type is used." ) action['pool'] = self._fqdn_name(item['pool']) def _handle_enable_action(self, action, item): """Handle the nuances of the enable type :param action: :param item: :return: """ action['type'] = 'enable' if 'asm_policy' not in item: raise F5ModuleError( "An 'asm_policy' must be specified when the 'enable' type is used." ) action.update(dict( policy=self._fqdn_name(item['asm_policy']), asm=True )) class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class ReportableChanges(Changes): returnables = [ 'description', 'actions', 'conditions' ] @property def actions(self): result = [] if self._values['actions'] is None: return [dict(type='ignore')] for item in self._values['actions']: action = dict() if 'forward' in item: action.update(item) action['type'] = 'forward' del action['forward'] elif 'enable' in item: action.update(item) action['type'] = 'enable' del action['enable'] result.append(action) result = sorted(result, key=lambda x: x['name']) return result @property def conditions(self): result = [] if self._values['conditions'] is None: return [dict(type='all_traffic')] for item in self._values['conditions']: action = dict() if 'httpUri' in item: action.update(item) action['type'] = 'http_uri' del action['httpUri'] result.append(action) # Names contains the index in which the rule is at. result = sorted(result, key=lambda x: x['name']) return result class UsableChanges(Changes): @property def actions(self): if self._values['actions'] is None: return None result = [] for action in self._values['actions']: if 'type' not in action: continue if action['type'] == 'forward': action['forward'] = True del action['type'] elif action['type'] == 'enable': action['enable'] = True del action['type'] elif action['type'] == 'ignore': result = [] break result.append(action) return result @property def conditions(self): if self._values['conditions'] is None: return None result = [] for condition in self._values['conditions']: if 'type' not in condition: continue if condition['type'] == 'http_uri': condition['httpUri'] = True del condition['type'] elif condition['type'] == 'all_traffic': result = [] break result.append(condition) return result class Difference(object): updatables = [ 'actions', 'conditions', 'description' ] def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 def to_tuple(self, items): result = [] for x in items: tmp = [(str(k), str(v)) for k, v in iteritems(x)] result += tmp return result def _diff_complex_items(self, want, have): if want == [] and have is None: return None if want is None: return None w = self.to_tuple(want) h = self.to_tuple(have) if set(w).issubset(set(h)): return None else: return want @property def actions(self): result = self._diff_complex_items(self.want.actions, self.have.actions) if self._conditions_missing_default_rule_for_asm(result): raise F5ModuleError( "The 'all_traffic' condition is required when using an ASM policy in a rule's 'enable' action." ) return result @property def conditions(self): result = self._diff_complex_items(self.want.conditions, self.have.conditions) return result def _conditions_missing_default_rule_for_asm(self, want_actions): if want_actions is None: actions = self.have.actions else: actions = want_actions if actions is None: return False if any(x for x in actions if x['type'] == 'enable'): conditions = self._diff_complex_items(self.want.conditions, self.have.conditions) if conditions is None: return False if any(y for y in conditions if y['type'] != 'all_traffic'): return True return False class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): changed = False result = dict() state = self.want.state try: if state == "present": changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def exists(self): args = dict( name=self.want.policy, partition=self.want.partition, ) if self.draft_exists(): args['subPath'] = 'Drafts' policy = self.client.api.tm.ltm.policys.policy.load(**args) result = policy.rules_s.rules.exists( name=self.want.name ) return result def draft_exists(self): params = dict( name=self.want.policy, partition=self.want.partition, subPath='Drafts' ) result = self.client.api.tm.ltm.policys.policy.exists(**params) return result def _create_existing_policy_draft_on_device(self): params = dict( name=self.want.policy, partition=self.want.partition, ) resource = self.client.api.tm.ltm.policys.policy.load(**params) resource.draft() return True def publish_on_device(self): resource = self.client.api.tm.ltm.policys.policy.load( name=self.want.policy, partition=self.want.partition, subPath='Drafts' ) resource.publish() return True def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True if self.draft_exists(): redraft = True else: redraft = False self._create_existing_policy_draft_on_device() self.update_on_device() if redraft is False: self.publish_on_device() return True def remove(self): if self.module.check_mode: return True if self.draft_exists(): redraft = True else: redraft = False self._create_existing_policy_draft_on_device() self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource.") if redraft is False: self.publish_on_device() return True def create(self): self.should_update() if self.module.check_mode: return True if self.draft_exists(): redraft = True else: redraft = False self._create_existing_policy_draft_on_device() self.create_on_device() if redraft is False: self.publish_on_device() return True def create_on_device(self): params = self.changes.api_params() policy = self.client.api.tm.ltm.policys.policy.load( name=self.want.policy, partition=self.want.partition, subPath='Drafts' ) policy.rules_s.rules.create( name=self.want.name, **params ) def update_on_device(self): params = self.changes.api_params() policy = self.client.api.tm.ltm.policys.policy.load( name=self.want.policy, partition=self.want.partition, subPath='Drafts' ) resource = policy.rules_s.rules.load( name=self.want.name ) resource.modify(**params) def absent(self): if self.exists(): return self.remove() return False def remove_from_device(self): policy = self.client.api.tm.ltm.policys.policy.load( name=self.want.policy, partition=self.want.partition, subPath='Drafts' ) resource = policy.rules_s.rules.load( name=self.want.name ) if resource: resource.delete() def read_current_from_device(self): args = dict( name=self.want.policy, partition=self.want.partition, ) if self.draft_exists(): args['subPath'] = 'Drafts' policy = self.client.api.tm.ltm.policys.policy.load(**args) resource = policy.rules_s.rules.load( name=self.want.name, requests_params=dict( params='expandSubcollections=true' ) ) return ApiParameters(params=resource.attrs) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( description=dict(), actions=dict( type='list', elements='dict', options=dict( type=dict( choices=[ 'forward', 'enable', 'ignore' ], required=True ), pool=dict(), asm_policy=dict() ), mutually_exclusive=[ ['pool', 'asm_policy'] ] ), conditions=dict( type='list', options=dict( type=dict( choices=[ 'http_uri', 'all_traffic' ], required=True ) ), path_begins_with_any=dict() ), name=dict(required=True), policy=dict(required=True), state=dict( default='present', choices=['absent', 'present'] ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode ) if not HAS_F5SDK: module.fail_json(msg="The python f5-sdk module is required") try: client = F5Client(**module.params) mm = ModuleManager(module=module, client=client) results = mm.exec_module() cleanup_tokens(client) module.exit_json(**results) except F5ModuleError as ex: cleanup_tokens(client) module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
import math from collections import defaultdict __all__ = [ "PermutedMNISTTaskIndices", ] class PermutedMNISTTaskIndices: """ A mixin that overwrites `compute_task_indices` when using permutedMNIST to allow for much faster dataset initialization. Note that this mixin may not work with other datasets. """ @classmethod def compute_task_indices(cls, config, dataset): # Assume dataloaders are already created class_indices = defaultdict(list) for idx in range(len(dataset)): target = _get_target(dataset, idx) class_indices[target].append(idx) # Defines how many classes should exist per task num_tasks = config.get("num_tasks", 1) num_classes = config.get("num_classes", None) assert num_classes is not None, "num_classes should be defined" num_classes_per_task = math.floor(num_classes / num_tasks) task_indices = defaultdict(list) for i in range(num_tasks): for j in range(num_classes_per_task): task_indices[i].extend(class_indices[j + (i * num_classes_per_task)]) return task_indices def _get_target(dataset, idx): target = int(dataset.targets[idx % len(dataset.data)]) task_id = dataset.get_task_id(idx) target += 10 * task_id return target
from module import Module from xml2aloe import MakeModule import sys, os, getopt argv = sys.argv input_file = None output_dir = None try: opts, args = getopt.getopt(argv,"hi:o:",["input_file=","output_dir="]) except getopt.GetoptError: print argv[0] + ' -i <input_file> -o <output_dir>' sys.exit(2) for opt, arg in opts: if opt == '-h': print argv[0] + ' -i <input_file> -o <output_dir>' sys.exit() elif opt in ("-i", "--input_file"): input_file = arg elif opt in ("-o", "--output_dir"): output_dir = arg if input_file == None or output_dir == None: print argv[0] + ' -i <input_file> -o <output_dir>' sys.exit(2) filename=os.path.basename(input_file).split('.')[0] print filename + '\n' print input_file + '\n' print output_dir + '\n' #m = Module("binsource") #m.readHeader(input_file) #MakeModule(m,output_dir) #print m.toString()
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) README_LOCAL = open(os.path.join(here, 'README.rst')).read() README_GLOBAL = open(os.path.join(here, 'README-NP.rst')).read() requires = [ 'setuptools', 'netprofile_entities >= 0.3', 'netprofile_hosts >= 0.3' ] setup( name='netprofile_devices', version='0.3', description='NetProfile Administrative UI - Devices Module', license='GNU Affero General Public License v3 or later (AGPLv3+)', long_description=README_LOCAL + '\n\n' + README_GLOBAL, classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: Implementation :: CPython', 'Framework :: Pyramid', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', 'Topic :: Office/Business :: Groupware', 'Topic :: Office/Business :: Scheduling', 'Development Status :: 3 - Alpha', 'Intended Audience :: Customer Service', 'Intended Audience :: Information Technology', 'Intended Audience :: System Administrators', 'Intended Audience :: Telecommunications Industry', 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)', 'Operating System :: OS Independent' ], author='Alex Unigovsky', author_email='unik@compot.ru', url='https://github.com/unikmhz/npui', keywords='web wsgi pyramid np netprofile crm billing accounting network isp', packages=find_packages(), include_package_data=True, zip_safe=False, test_suite='netprofile_devices', install_requires=requires, entry_points="""\ [netprofile.modules] devices = netprofile_devices:Module """, message_extractors={'.' : [ ('**.py', 'python', None), ('**.pt', 'xml', None), ('**.mak', 'mako', None) ]} )
from misp2yara import mispevent2yara, mispattrs2yara, MISPRuleTemplate import sys import json import os from optparse import OptionParser def rules2json_export(rules, extra_comment=''): return json.dumps([rule2json_export(r) for r in rules]) def rule2json_export(rule, extra_comment=''): json_dict = { 'value': str(rule), 'comment': '', 'valid': None } if isinstance(rule, MISPRuleTemplate): if rule.loaded_from_source: json_dict['comment'] += 'Loaded from source. ' else: json_dict['comment'] += 'Generated. ' if rule.autofixed: json_dict['comment'] += 'May be unreliable due to automatic repairs: ' json_dict['comment'] += rule.autofixed_comment json_dict['valid'] = True return json_dict else: json_dict['comment'] += 'Broken yara attribute. Could not parse or repair.' json_dict['valid'] = False return json_dict def file_is_empty(path): return os.stat(path).st_size==0 def output_json(output_path, output_rules): with open(output_path, 'a+', encoding='utf-8') as f: if file_is_empty(output_path): pass else: f.write(',') to_write = rules2json_export(output_rules)[1:-1] f.write(to_write) def output_raw(output_path, output_rules): with open(output_path, 'a+', encoding='utf-8') as f: to_write = '\n\n'.join([str(r) for r in output_rules]) f.write(to_write) if __name__ == "__main__": parser = OptionParser() parser.add_option("-i", "--input", dest="in_file", help="input file", metavar="FILE") parser.add_option("-g", "--out-generated", dest="out_gen", help="output for generated rules", metavar="FILE") parser.add_option("-a", "--out-asis", dest="out_asis", help="output for as-is rules", metavar="FILE") parser.add_option("-r", "--raw", action="store_true", dest="raw_output", default=False, help="outputs raw yara rules instead of json-structured rules") (options, args) = parser.parse_args() in_path = options.in_file out_path_gen = options.out_gen out_path_asis = options.out_asis raw_mode = options.raw_output loaded = None with open(in_path, 'r', encoding='utf-8') as in_file: content = in_file.read() if content: loaded = json.loads(content)['response'] # raise Warning("loaded {}".format(content)) if 'Attribute' in loaded: generated, asis_valid, asis_broken = mispattrs2yara(loaded['Attribute']) elif isinstance(loaded, list): generated = [] asis_valid = [] asis_broken = [] for event_dict in loaded: if 'Event' in event_dict: curr_generated, curr_asis_valid, curr_asis_broken = mispevent2yara(event_dict['Event']) generated += curr_generated asis_valid += curr_asis_valid asis_broken += curr_asis_broken else: raise Exception('Json doesn\'t seem to be an list of attributes or events') else: raise Exception('Json doesn\'t seem to be an list of attributes or events') if raw_mode: output_raw(out_path_gen, generated) output_raw(out_path_asis, asis_valid + asis_broken) else: output_json(out_path_gen, generated) output_json(out_path_asis, asis_valid + asis_broken)
from tests.util.base import dbloader, db, default_account
import sys from UM.Logger import Logger try: from . import ThreeMFWriter except ImportError: Logger.log("w", "Could not import ThreeMFWriter; libSavitar may be missing") from . import ThreeMFWorkspaceWriter from UM.i18n import i18nCatalog from UM.Platform import Platform i18n_catalog = i18nCatalog("uranium") def getMetaData(): # Workarround for osx not supporting double file extensions correctly. if Platform.isOSX(): workspace_extension = "3mf" else: workspace_extension = "curaproject.3mf" metaData = {} if "3MFWriter.ThreeMFWriter" in sys.modules: metaData["mesh_writer"] = { "output": [{ "extension": "3mf", "description": i18n_catalog.i18nc("@item:inlistbox", "3MF file"), "mime_type": "application/vnd.ms-package.3dmanufacturing-3dmodel+xml", "mode": ThreeMFWriter.ThreeMFWriter.OutputMode.BinaryMode }] } metaData["workspace_writer"] = { "output": [{ "extension": workspace_extension, "description": i18n_catalog.i18nc("@item:inlistbox", "Cura Project 3MF file"), "mime_type": "application/x-curaproject+xml", "mode": ThreeMFWorkspaceWriter.ThreeMFWorkspaceWriter.OutputMode.BinaryMode }] } return metaData def register(app): if "3MFWriter.ThreeMFWriter" in sys.modules: return {"mesh_writer": ThreeMFWriter.ThreeMFWriter(), "workspace_writer": ThreeMFWorkspaceWriter.ThreeMFWorkspaceWriter()} else: return {}
""" This file contains celery tasks for sending email """ import logging from celery import shared_task from celery.exceptions import MaxRetriesExceededError from django.conf import settings from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user from django.contrib.sites.models import Site from edx_ace import ace from edx_ace.errors import RecoverableChannelDeliveryError from edx_ace.message import Message from edx_django_utils.monitoring import set_code_owner_attribute from common.djangoapps.track import segment from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers from openedx.core.djangoapps.user_authn.utils import check_pwned_password from openedx.core.lib.celery.task_utils import emulate_http_request log = logging.getLogger('edx.celery.task') @shared_task @set_code_owner_attribute def check_pwned_password_and_send_track_event(user_id, password, internal_user=False, is_new_user=False): """ Check the Pwned Databases and send its event to Segment. """ try: pwned_properties = check_pwned_password(password) if pwned_properties: pwned_properties['internal_user'] = internal_user pwned_properties['new_user'] = is_new_user segment.track(user_id, 'edx.bi.user.pwned.password.status', pwned_properties) except Exception: # pylint: disable=W0703 log.exception( 'Unable to get response from pwned password api for user_id: "%s"', user_id, ) return None # lint-amnesty, pylint: disable=raise-missing-from @shared_task(bind=True) @set_code_owner_attribute def send_activation_email(self, msg_string, from_address=None): """ Sending an activation email to the user. """ msg = Message.from_string(msg_string) max_retries = settings.RETRY_ACTIVATION_EMAIL_MAX_ATTEMPTS retries = self.request.retries if from_address is None: from_address = configuration_helpers.get_value('ACTIVATION_EMAIL_FROM_ADDRESS') or ( configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL) ) msg.options['from_address'] = from_address dest_addr = msg.recipient.email_address site = Site.objects.get_current() user = User.objects.get(id=msg.recipient.lms_user_id) try: with emulate_http_request(site=site, user=user): ace.send(msg) except RecoverableChannelDeliveryError: log.info('Retrying sending email to user {dest_addr}, attempt # {attempt} of {max_attempts}'.format( dest_addr=dest_addr, attempt=retries, max_attempts=max_retries )) try: self.retry(countdown=settings.RETRY_ACTIVATION_EMAIL_TIMEOUT, max_retries=max_retries) except MaxRetriesExceededError: log.error( 'Unable to send activation email to user from "%s" to "%s"', from_address, dest_addr, exc_info=True ) except Exception: log.exception( 'Unable to send activation email to user from "%s" to "%s"', from_address, dest_addr, ) raise Exception # lint-amnesty, pylint: disable=raise-missing-from
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh.""" from import_shims.warn import warn_deprecated_import warn_deprecated_import('ccx.tests.test_tasks', 'lms.djangoapps.ccx.tests.test_tasks') from lms.djangoapps.ccx.tests.test_tasks import *
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh.""" from import_shims.warn import warn_deprecated_import warn_deprecated_import('commerce.management.commands', 'lms.djangoapps.commerce.management.commands') from lms.djangoapps.commerce.management.commands import *
import os import re import subprocess from pyload import PKGDIR from pyload.core.utils.convert import to_str from pyload.plugins.base.extractor import ArchiveError, BaseExtractor, CRCError, PasswordError from pyload.plugins.helpers import renice class UnRar(BaseExtractor): __name__ = "UnRar" __type__ = "extractor" __version__ = "1.44" __status__ = "testing" __config__ = [("ignore_warnings", "bool", "Ignore unrar warnings", False)] __description__ = """RAR extractor plugin""" __license__ = "GPLv3" __authors__ = [ ("RaNaN", "RaNaN@pyload.net"), ("Walter Purcaro", "vuolter@gmail.com"), ("Immenz", "immenz@gmx.net"), ("GammaCode", "nitzo2001[AT]yahoo[DOT]com"), ] CMD = "unrar" EXTENSIONS = [ "rar", "cab", "arj", "lzh", "tar", "gz", "ace", "uue", "bz2", "jar", "iso", "xz", "z", ] _RE_PART = re.compile(r"\.(part|r)\d+(\.rar|\.rev)?(\.bad)?|\.rar$", re.I) _RE_FIXNAME = re.compile(r"Building (.+)") _RE_FILES_V4 = re.compile( r"^([* ])(.+?)\s+(\d+)\s+(\d+)\s+(\d+%|-->|<--)\s+([\d-]+)\s+([\d:]+)\s*([ACHIRS.rw\-]+)\s+([0-9A-F]{8})\s+(\w+)\s+([\d.]+)", re.M ) _RE_FILES_V5 = re.compile( r"^([* ])\s*([ACHIRS.rw\-]+)\s+(\d+)(?:\s+\d+)?(?:\s+(?:\d+%|-->|<--))?\s+([\d-]+)\s+([\d:]+)(?:\s+[0-9A-F]{8})?\s+(.+)", re.M ) _RE_BADPWD = re.compile(r"password", re.I) _RE_BADCRC = re.compile( r"encrypted|damaged|CRC failed|checksum error|corrupt", re.I ) _RE_VERSION = re.compile(r"(?:UN)?RAR\s(\d+\.\d+)", re.I) @classmethod def find(cls): try: if os.name == "nt": cls.CMD = os.path.join(PKGDIR, "lib", "RAR.exe") else: cls.CMD = "rar" p = subprocess.Popen( [cls.CMD], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = (to_str(r).strip() if r else "" for r in p.communicate()) # cls.__name__ = "RAR" cls.REPAIR = True except OSError: try: if os.name == "nt": cls.CMD = os.path.join(PKGDIR, "lib", "UnRAR.exe") else: cls.CMD = "unrar" p = subprocess.Popen( [cls.CMD], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = (to_str(r).strip() if r else "" for r in p.communicate()) except OSError: return False m = cls._RE_VERSION.search(out) if m is not None: cls.VERSION = m.group(1) cls._RE_FILES = cls._RE_FILES_V4 if float(cls.VERSION) < 5 else cls._RE_FILES_V5 return True else: return False @classmethod def ismultipart(cls, filename): return cls._RE_PART.search(filename) is not None def verify(self, password=None): p = self.call_cmd("l", "-v", self.filename, password=password) out, err = (to_str(r).strip() if r else "" for r in p.communicate()) if self._RE_BADPWD.search(err): raise PasswordError if self._RE_BADCRC.search(err): raise CRCError(err) #: Output is only used to check if password protected files are present for groups in self._RE_FILES.findall(out): if groups[0] == "*": raise PasswordError def repair(self): p = self.call_cmd("rc", self.filename) #: Communicate and retrieve stderr self.progress(p) out, err = (to_str(r).strip() if r else "" for r in p.communicate()) if err or p.returncode: p = self.call_cmd("r", self.filename) # communicate and retrieve stderr self.progress(p) out, err = (to_str(r).strip() if r else "" for r in p.communicate()) if err or p.returncode: return False else: dir = os.path.dirname(self.filename) name = self._RE_FIXNAME.search(out).group(1) self.filename = os.path.join(dir, name) return True def progress(self, process): s = b"" while True: c = process.stdout.read(1) #: Quit loop on eof if not c: break #: Reading a percentage sign -> set progress and restart if c == b'%' and s: self.pyfile.set_progress(int(s)) s = b"" #: Not reading a digit -> therefore restart elif not c.isdigit(): s = b"" #: Add digit to progressstring else: s += c def extract(self, password=None): command = "x" if self.fullpath else "e" p = self.call_cmd(command, self.filename, self.dest, password=password) #: Communicate and retrieve stderr self.progress(p) out, err = (to_str(r).strip() if r else "" for r in p.communicate()) if err: if self._RE_BADPWD.search(err): raise PasswordError elif self._RE_BADCRC.search(err): raise CRCError(err) elif self.config.get("ignore_warnings", False) and err.startswith( "WARNING:" ): pass else: #: Raise error if anything is on stderr raise ArchiveError(err) if p.returncode: raise ArchiveError(self._("Process return code: {}").format(p.returncode)) return self.list(password) def chunks(self): files = [] dir, name = os.path.split(self.filename) #: eventually Multipart Files files.extend( os.path.join(dir, os.path.basename(_f)) for _f in filter(self.ismultipart, os.listdir(dir)) if self._RE_PART.sub("", name) == self._RE_PART.sub("", _f) ) #: Actually extracted file if self.filename not in files: files.append(self.filename) return files def list(self, password=None): command = "v" if self.fullpath else "l" p = self.call_cmd(command, "-v", self.filename, password=password) out, err = (to_str(r).strip() if r else "" for r in p.communicate()) if "Cannot open" in err: raise ArchiveError(self._("Cannot open file")) if err: #: Only log error at this point self.log_error(err) files = set() f_grp = 5 if float(self.VERSION) >= 5 else 1 for groups in self._RE_FILES.findall(out): f = groups[f_grp].strip() if not self.fullpath: f = os.path.basename(f) files.add(os.path.join(self.dest, f)) self.files = list(files) return self.files def call_cmd(self, command, *xargs, **kwargs): args = [] if float(self.VERSION) >= 5.5: #: Specify UTF-8 encoding args.append("-scf") #: Overwrite flag if self.overwrite: args.append("-o+") else: args.append("-o-") args.append("-or") for word in self.excludefiles: args.append("-x{}".format(word.strip())) #: Assume yes on all queries args.append("-y") #: Disable comments show args.append("-c-") #: Set a password password = kwargs.get("password") if password: args.append("-p{}".format(password)) else: args.append("-p-") if self.keepbroken: args.append("-kb") # NOTE: return codes are not reliable, some kind of threading, cleanup # whatever issue call = [self.CMD, command] + args + list(xargs) self.log_debug("EXECUTE " + " ".join(call)) call = [to_str(cmd) for cmd in call] p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE) renice(p.pid, self.priority) return p
try: from pyparsing import Literal, CaselessLiteral, Word, OneOrMore, ZeroOrMore, \ Forward, delimitedList, Group, Optional, Combine, alphas, nums, restOfLine, cStyleComment, \ alphanums, ParseException, ParseResults, Keyword, StringEnd, replaceWith except ImportError: print "Module pyparsing not found." exit(1) import ptypes import sys cvtInt = lambda toks: int(toks[0]) def parseVariableDef(toks): t = toks[0][0] pointer = toks[0][1] name = toks[0][2] array_size = toks[0][3] attributes = toks[0][4] if array_size != None: t = ptypes.ArrayType(t, array_size) if pointer != None: t = ptypes.PointerType(t) return ptypes.Member(name, t, attributes) bnf = None def SPICE_BNF(): global bnf if not bnf: # punctuation colon = Literal(":").suppress() lbrace = Literal("{").suppress() rbrace = Literal("}").suppress() lbrack = Literal("[").suppress() rbrack = Literal("]").suppress() lparen = Literal("(").suppress() rparen = Literal(")").suppress() equals = Literal("=").suppress() comma = Literal(",").suppress() semi = Literal(";").suppress() # primitive types int8_ = Keyword("int8").setParseAction(replaceWith(ptypes.int8)) uint8_ = Keyword("uint8").setParseAction(replaceWith(ptypes.uint8)) int16_ = Keyword("int16").setParseAction(replaceWith(ptypes.int16)) uint16_ = Keyword("uint16").setParseAction(replaceWith(ptypes.uint16)) int32_ = Keyword("int32").setParseAction(replaceWith(ptypes.int32)) uint32_ = Keyword("uint32").setParseAction(replaceWith(ptypes.uint32)) int64_ = Keyword("int64").setParseAction(replaceWith(ptypes.int64)) uint64_ = Keyword("uint64").setParseAction(replaceWith(ptypes.uint64)) # keywords channel_ = Keyword("channel") enum32_ = Keyword("enum32").setParseAction(replaceWith(32)) enum16_ = Keyword("enum16").setParseAction(replaceWith(16)) enum8_ = Keyword("enum8").setParseAction(replaceWith(8)) flags32_ = Keyword("flags32").setParseAction(replaceWith(32)) flags16_ = Keyword("flags16").setParseAction(replaceWith(16)) flags8_ = Keyword("flags8").setParseAction(replaceWith(8)) channel_ = Keyword("channel") server_ = Keyword("server") client_ = Keyword("client") protocol_ = Keyword("protocol") typedef_ = Keyword("typedef") struct_ = Keyword("struct") message_ = Keyword("message") image_size_ = Keyword("image_size") bytes_ = Keyword("bytes") cstring_ = Keyword("cstring") switch_ = Keyword("switch") default_ = Keyword("default") case_ = Keyword("case") identifier = Word( alphas, alphanums + "_" ) enumname = Word( alphanums + "_" ) integer = ( Combine( CaselessLiteral("0x") + Word( nums+"abcdefABCDEF" ) ) | Word( nums+"+-", nums ) ).setName("int").setParseAction(cvtInt) typename = identifier.copy().setParseAction(lambda toks : ptypes.TypeRef(str(toks[0]))) # This is just normal "types", i.e. not channels or messages typeSpec = Forward() attributeValue = integer ^ identifier attribute = Group(Combine ("@" + identifier) + Optional(lparen + delimitedList(attributeValue) + rparen)) attributes = Group(ZeroOrMore(attribute)) arraySizeSpecImage = Group(image_size_ + lparen + integer + comma + identifier + comma + identifier + rparen) arraySizeSpecBytes = Group(bytes_ + lparen + identifier + comma + identifier + rparen) arraySizeSpecCString = Group(cstring_ + lparen + rparen) arraySizeSpec = lbrack + Optional(identifier ^ integer ^ arraySizeSpecImage ^ arraySizeSpecBytes ^arraySizeSpecCString, default="") + rbrack variableDef = Group(typeSpec + Optional("*", default=None) + identifier + Optional(arraySizeSpec, default=None) + attributes - semi) \ .setParseAction(parseVariableDef) switchCase = Group(Group(OneOrMore(default_.setParseAction(replaceWith(None)) + colon | Group(case_.suppress() + Optional("!", default="") + identifier) + colon)) + variableDef) \ .setParseAction(lambda toks: ptypes.SwitchCase(toks[0][0], toks[0][1])) switchBody = Group(switch_ + lparen + delimitedList(identifier,delim='.', combine=True) + rparen + lbrace + Group(OneOrMore(switchCase)) + rbrace + identifier + attributes - semi) \ .setParseAction(lambda toks: ptypes.Switch(toks[0][1], toks[0][2], toks[0][3], toks[0][4])) messageBody = structBody = Group(lbrace + ZeroOrMore(variableDef | switchBody) + rbrace) structSpec = Group(struct_ + identifier + structBody + attributes).setParseAction(lambda toks: ptypes.StructType(toks[0][1], toks[0][2], toks[0][3])) # have to use longest match for type, in case a user-defined type name starts with a keyword type, like "channel_type" typeSpec << ( structSpec ^ int8_ ^ uint8_ ^ int16_ ^ uint16_ ^ int32_ ^ uint32_ ^ int64_ ^ uint64_ ^ typename).setName("type") flagsBody = enumBody = Group(lbrace + delimitedList(Group (enumname + Optional(equals + integer))) + Optional(comma) + rbrace) messageSpec = Group(message_ + messageBody + attributes).setParseAction(lambda toks: ptypes.MessageType(None, toks[0][1], toks[0][2])) | typename channelParent = Optional(colon + typename, default=None) channelMessage = Group(messageSpec + identifier + Optional(equals + integer, default=None) + semi) \ .setParseAction(lambda toks: ptypes.ChannelMember(toks[0][1], toks[0][0], toks[0][2])) channelBody = channelParent + Group(lbrace + ZeroOrMore( server_ + colon | client_ + colon | channelMessage) + rbrace) enum_ = (enum32_ | enum16_ | enum8_) flags_ = (flags32_ | flags16_ | flags8_) enumDef = Group(enum_ + identifier + enumBody + attributes - semi).setParseAction(lambda toks: ptypes.EnumType(toks[0][0], toks[0][1], toks[0][2], toks[0][3])) flagsDef = Group(flags_ + identifier + flagsBody + attributes - semi).setParseAction(lambda toks: ptypes.FlagsType(toks[0][0], toks[0][1], toks[0][2], toks[0][3])) messageDef = Group(message_ + identifier + messageBody + attributes - semi).setParseAction(lambda toks: ptypes.MessageType(toks[0][1], toks[0][2], toks[0][3])) channelDef = Group(channel_ + identifier + channelBody + attributes - semi).setParseAction(lambda toks: ptypes.ChannelType(toks[0][1], toks[0][2], toks[0][3], toks[0][4])) structDef = Group(struct_ + identifier + structBody + attributes - semi).setParseAction(lambda toks: ptypes.StructType(toks[0][1], toks[0][2], toks[0][3])) typedefDef = Group(typedef_ + identifier + typeSpec + attributes - semi).setParseAction(lambda toks: ptypes.TypeAlias(toks[0][1], toks[0][2], toks[0][3])) definitions = typedefDef | structDef | enumDef | flagsDef | messageDef | channelDef protocolChannel = Group(typename + identifier + Optional(equals + integer, default=None) + semi) \ .setParseAction(lambda toks: ptypes.ProtocolMember(toks[0][1], toks[0][0], toks[0][2])) protocolDef = Group(protocol_ + identifier + Group(lbrace + ZeroOrMore(protocolChannel) + rbrace) + semi) \ .setParseAction(lambda toks: ptypes.ProtocolType(toks[0][1], toks[0][2])) bnf = ZeroOrMore (definitions) + protocolDef + StringEnd() singleLineComment = "//" + restOfLine bnf.ignore( singleLineComment ) bnf.ignore( cStyleComment ) return bnf def parse(filename): try: bnf = SPICE_BNF() types = bnf.parseFile(filename) except ParseException, err: print >> sys.stderr, err.line print >> sys.stderr, " "*(err.column-1) + "^" print >> sys.stderr, err return None for t in types: t.resolve() t.register() protocol = types[-1] return protocol
from flumotion.common import testsuite import os import shutil from flumotion.common import xdg class TestXDGConfig(testsuite.TestCase): def setUp(self): self.old_home = os.environ.get('HOME') self.old_xdg_config_home = os.environ.get('XDG_CONFIG_HOME') self.old_xdg_config_dirs = os.environ.get('XDG_CONFIG_DIRS') self.home = self.mktemp() os.mkdir(self.home) os.environ['HOME'] = self.home self.xdg_config_home = self.mktemp() os.mkdir(self.xdg_config_home) os.mkdir(os.path.join(self.xdg_config_home, xdg.APPLICATION)) self.xdg_config_dir1 = self.mktemp() os.mkdir(self.xdg_config_dir1) os.mkdir(os.path.join(self.xdg_config_dir1, xdg.APPLICATION)) self.xdg_config_dir2 = self.mktemp() os.mkdir(self.xdg_config_dir2) os.mkdir(os.path.join(self.xdg_config_dir2, xdg.APPLICATION)) os.environ['XDG_CONFIG_HOME'] = self.xdg_config_home os.environ['XDG_CONFIG_DIRS'] = ':'.join((self.xdg_config_dir1, self.xdg_config_dir2)) def tearDown(self): shutil.rmtree(self.home) shutil.rmtree(self.xdg_config_home, True) shutil.rmtree(self.xdg_config_dir1, True) shutil.rmtree(self.xdg_config_dir2, True) if self.old_home is not None: os.environ['HOME'] = self.old_home else: del os.environ['HOME'] if self.old_xdg_config_home is not None: os.environ['XDG_CONFIG_HOME'] = self.old_xdg_config_home else: del os.environ['XDG_CONFIG_HOME'] if self.old_xdg_config_dirs is not None: os.environ['XDG_CONFIG_DIRS'] = self.old_xdg_config_dirs else: del os.environ['XDG_CONFIG_DIRS'] def testConfigReadPath(self): app = xdg.APPLICATION # no such config file exists self.assertIdentical(xdg.config_read_path('test'), None) # create a config file in the first XDG config dir path = os.path.join(self.xdg_config_dir1, app, 'test') file(path, 'w').close() # should now be found self.assertEquals(xdg.config_read_path('test'), path) # create a config file in the second XDG config dir, should not change # the order path2 = os.path.join(self.xdg_config_dir2, app, 'test') file(path2, 'w').close() self.assertEquals(xdg.config_read_path('test'), path) # remove the file from the first XDG config dir, the second one should # be found os.remove(path) self.assertEquals(xdg.config_read_path('test'), path2) # create a config file in the XDG home dir, should come first path_home = os.path.join(self.xdg_config_home, app, 'test') file(path_home, 'w').close() self.assertEquals(xdg.config_read_path('test'), path_home) # chmod that file 000, should be skipped old_perms = os.stat(path_home).st_mode os.chmod(path_home, 0000) self.assertEquals(xdg.config_read_path('test'), path2) os.chmod(path_home, old_perms) def testConfigWritePath(self): app = xdg.APPLICATION # the file should be created path = os.path.join(self.xdg_config_home, app, 'write') f = xdg.config_write_path('write', 'wb') self.assertEquals(f.name, path) self.assertEquals(f.mode, 'wb') # the subdir should be created path = os.path.join(self.xdg_config_home, app, 'subdir', 'write') f = xdg.config_write_path('subdir/write') self.assertEquals(os.path.isdir(os.path.join( self.xdg_config_home, app, 'subdir')), True) self.assertEquals(f.name, path) # default mode is 'w' self.assertEquals(f.mode, 'w') f.write('abc') f.close() f = xdg.config_write_path('subdir/write', 'a') f.write('def') f.close() self.assertEquals(file(path).read(), 'abcdef') def testUnsetHomedir(self): app = xdg.APPLICATION del os.environ['XDG_CONFIG_HOME'] # with unset $XDG_CONFIG_HOME, $HOME/.config should be used f = xdg.config_write_path('unset', 'w') self.assertEquals(f.name, os.path.join(self.home, '.config', app, 'unset')) os.environ['XDG_CONFIG_HOME'] = self.xdg_config_home def testNotWritable(self): # errors in writing should be reported old_perms = os.stat(self.xdg_config_home).st_mode os.chmod(self.xdg_config_home, 0000) self.assertRaises(OSError, xdg.config_write_path, 'error', 'w') os.chmod(self.xdg_config_home, old_perms)
CommandlineUsage = """Material - Tool to work with FreeCAD Material definition cards Usage: Material [Options] card-file-name Options: -c, --output-csv=file-name write a comma seperated grid with the material data Exit: 0 No Error or Warning found 1 Argument error, wrong or less Arguments given Tool to work with FreeCAD Material definition cards Examples: Material "StandardMaterial/Steel.FCMat" Autor: (c) 2013 Juergen Riegel mail@juergen-riegel.net Licence: LGPL Version: 0.1 """ def importFCMat(fileName): "Read a FCMat file into a dictionary" import ConfigParser Config = ConfigParser.ConfigParser() Config.read(fileName) dict1 = {} for section in Config.sections(): options = Config.options(section) for option in options: dict1[section+'_'+option] = Config.get(section, option) return dict1 def exportFCMat(fileName,matDict): "Write a material dictionary to a FCMat file" import ConfigParser,string Config = ConfigParser.ConfigParser() # create groups for x in matDict.keys(): grp,key = string.split(x,sep='_') if not Config.has_section(grp): Config.add_section(grp) # fill groups for x in matDict.keys(): grp,key = string.split(x,sep='_') Config.set(grp,key,matDict[x]) Preamble = "# This is a FreeCAD material-card file\n\n" # Writing our configuration file to 'example.cfg' with open(fileName, 'wb') as configfile: configfile.write(Preamble) Config.write(configfile) if __name__ == '__main__': import sys, getopt try: opts, args = getopt.getopt(sys.argv[1:], "c:", ["output-csv="]) except getopt.GetoptError: # print help information and exit: sys.stderr.write(CommandlineUsage) sys.exit(1) # checking on the options for o, a in opts: if o in ("-c", "--output-csv"): print "writing file: " + a +"\n" OutPath = a # runing through the files FileName = args[0] kv_map = importFCMat(FileName) for k in kv_map.keys(): print `k` + " : " + `kv_map[k]` sys.exit(0) # no error
from spack import * class Dtcmp(Package): """The Datatype Comparison Library provides comparison operations and parallel sort algorithms for MPI applications.""" homepage = "https://github.com/hpc/dtcmp" url = "https://github.com/hpc/dtcmp/releases/download/v1.0.3/dtcmp-1.0.3.tar.gz" version('1.0.3', 'cdd8ccf71e8ff67de2558594a7fcd317') depends_on('mpi') depends_on('lwgrp') def install(self, spec, prefix): configure("--prefix=" + prefix, "--with-lwgrp=" + spec['lwgrp'].prefix) make() make("install")
from django.shortcuts import render from django.http import HttpResponse, Http404 from django.views.decorators.csrf import csrf_protect import approver.utils as utils def error404(request): context = { 'content': 'approver/404.html', } return utils.layout_render(request,context)