function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def test_update_domain_empty(self): self.assertRaises(exc.MissingDNSSettings, self.client.update_domain, self.domain)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_delete(self): clt = self.client mgr = clt._manager dom = self.domain mgr._async_call = Mock(return_value=({}, {})) uri = "/domains/%s" % utils.get_id(dom) clt.delete(dom) mgr._async_call.assert_called_once_with(uri, method="DELETE", error_class=exc.DomainDeletionFailed, has_response=False)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_list_subdomains(self): clt = self.client mgr = clt._manager dom = self.domain resp_body = {'Something': 'here'} clt.method_get = Mock(return_value=({}, resp_body)) uri = "/domains?name=%s&limit=5" % dom.name clt.list_subdomains(dom, limit=5) clt.method_get.assert_called_once_with(uri)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_search_records(self): clt = self.client mgr = clt._manager dom = self.domain typ = "A" uri = "/domains/%s/records?type=%s" % (utils.get_id(dom), typ) ret_body = {"records": [{"type": typ}]} mgr.count = 0 def mock_get(uri): if mgr.count: return ({}, ret_body) mgr.count += 1 ret = {"totalEntries": 2, "links": [ {"href": uri, "rel": "next"}]} ret.update(ret_body) return ({}, ret) clt.method_get = Mock(wraps=mock_get) clt.search_records(dom, typ) calls = [call(uri), call(uri)] clt.method_get.assert_has_calls(calls)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_find_record(self): clt = self.client mgr = clt._manager dom = self.domain typ = "A" nm = utils.random_unicode() data = "0.0.0.0" ret_body = {"records": [{ "accountId": "728829", "created": "2012-09-21T21:32:27.000+0000", "emailAddress": "me@example.com", "id": "3448214", "name": "example.com", "updated": "2012-09-21T21:35:45.000+0000" }]} clt.method_get = Mock(return_value=({}, ret_body)) uri = "/domains/%s/records?type=%s&name=%s&data=%s" % ( utils.get_id(dom), typ, nm, data) clt.find_record(dom, typ, name=nm, data=data) clt.method_get.assert_called_once_with(uri)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_find_record_not_unique(self): clt = self.client mgr = clt._manager dom = self.domain typ = "A" nm = utils.random_unicode() data = "0.0.0.0" ret_body = {"records": [{ "accountId": "728829", "created": "2012-09-21T21:32:27.000+0000", "emailAddress": "me@example.com", "id": "3448214", "name": "example.com", "updated": "2012-09-21T21:35:45.000+0000" }, {"accountId": "728829", "created": "2012-09-21T21:32:27.000+0000", "emailAddress": "me@example.com", "id": "3448214", "name": "example.com", "updated": "2012-09-21T21:35:45.000+0000" }]} clt.method_get = Mock(return_value=({}, ret_body)) uri = "/domains/%s/records?type=%s&name=%s&data=%s" % ( utils.get_id(dom), typ, nm, data) self.assertRaises(exc.DomainRecordNotUnique, clt.find_record, dom, typ, name=nm, data=data)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_get_record(self): clt = self.client mgr = clt._manager dom = self.domain nm = utils.random_unicode() rec_id = utils.random_unicode() rec_dict = {"id": rec_id, "name": nm} mgr.api.method_get = Mock(return_value=(None, rec_dict)) ret = clt.get_record(dom, rec_id) mgr.api.method_get.assert_called_once_with("/%s/%s/records/%s" % (mgr.uri_base, dom.id, rec_id))
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_delete_record(self): clt = self.client mgr = clt._manager dom = self.domain rec = CloudDNSRecord(mgr, {"id": utils.random_unicode()}) mgr._async_call = Mock(return_value=({}, {})) uri = "/domains/%s/records/%s" % (utils.get_id(dom), utils.get_id(rec)) clt.delete_record(dom, rec) mgr._async_call.assert_called_once_with(uri, method="DELETE", error_class=exc.DomainRecordDeletionFailed, has_response=False)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_resolve_device_type_invalid(self): clt = self.client mgr = clt._manager device = object() self.assertRaises(exc.InvalidDeviceType, mgr._resolve_device_type, device)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_list_ptr_records(self): clt = self.client mgr = clt._manager dvc = fakes.FakeDNSDevice() href = "%s/%s" % (example_uri, dvc.id) svc_name = "cloudServersOpenStack" uri = "/rdns/%s?href=%s" % (svc_name, href) mgr._get_ptr_details = Mock(return_value=(href, svc_name)) clt.method_get = Mock(return_value=({}, {"records": []})) ret = clt.list_ptr_records(dvc) clt.method_get.assert_called_once_with(uri) self.assertEqual(ret, [])
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_add_ptr_records(self): clt = self.client mgr = clt._manager dvc = fakes.FakeDNSDevice() href = "%s/%s" % (example_uri, dvc.id) svc_name = "cloudServersOpenStack" rec = {"foo": "bar"} body = {"recordsList": {"records": [rec]}, "link": {"content": "", "href": href, "rel": svc_name}} uri = "/rdns" mgr._get_ptr_details = Mock(return_value=(href, svc_name)) mgr._async_call = Mock(return_value=({}, {"records": []})) clt.add_ptr_records(dvc, rec) mgr._async_call.assert_called_once_with(uri, body=body, error_class=exc.PTRRecordCreationFailed, method="POST")
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_delete_ptr_records(self): clt = self.client mgr = clt._manager dvc = fakes.FakeDNSDevice() href = "%s/%s" % (example_uri, dvc.id) svc_name = "cloudServersOpenStack" ip_address = "0.0.0.0" uri = "/rdns/%s?href=%s&ip=%s" % (svc_name, href, ip_address) mgr._get_ptr_details = Mock(return_value=(href, svc_name)) mgr._async_call = Mock(return_value=({}, {"records": []})) ret = clt.delete_ptr_records(dvc, ip_address=ip_address) mgr._async_call.assert_called_once_with(uri, error_class=exc.PTRRecordDeletionFailed, method="DELETE", has_response=False)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_get_rate_limits(self): clt = self.client limits = [{"uri": "fake1", "limit": 1}, {"uri": "fake2", "limit": 2}] resp = {"limits": {"rate": limits}} resp_limits = [{"uri": "fake1", "limits": 1}, {"uri": "fake2", "limits": 2}] clt.method_get = Mock(return_value=({}, resp)) ret = clt.get_rate_limits() self.assertEqual(ret, resp_limits)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_iter(self): clt = self.client mgr = clt._manager res_iter = DomainResultsIterator(mgr) ret = res_iter.__iter__() self.assertTrue(ret is res_iter)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_iter_items_first_fetch(self): clt = self.client mgr = clt._manager fake_name = utils.random_unicode() ret_body = {"domains": [{"name": fake_name}]} clt.method_get = Mock(return_value=({}, ret_body)) res_iter = DomainResultsIterator(mgr) ret = res_iter.next() self.assertTrue(isinstance(ret, CloudDNSDomain)) clt.method_get.assert_called_once_with("/domains")
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_iter_items_next_stop(self): clt = self.client mgr = clt._manager res_iter = DomainResultsIterator(mgr) res_iter.next_uri = None self.assertRaises(StopIteration, res_iter.next)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_record_iter(self): clt = self.client mgr = clt._manager res_iter = RecordResultsIterator(mgr) self.assertEqual(res_iter.paging_service, "record")
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def test_client_empty_get_body_error(self): clt = self.client self.assertRaises(exc.ServiceResponseFailure, clt.get_absolute_limits)
rackspace/pyrax
[ 238, 218, 238, 75, 1348707957 ]
def __init__( self, *, bucket_key: str, bucket_name: Optional[str] = None, wildcard_match: bool = False, aws_conn_id: str = 'aws_default', verify: Optional[Union[str, bool]] = None, **kwargs,
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def poke(self, context): if self.bucket_name is None: parsed_url = urlparse(self.bucket_key) if parsed_url.netloc == '': raise AirflowException('If key is a relative path from root, please provide a bucket_name') self.bucket_name = parsed_url.netloc self.bucket_key = parsed_url.path.lstrip('/') else: parsed_url = urlparse(self.bucket_key) if parsed_url.scheme != '' or parsed_url.netloc != '': raise AirflowException( 'If bucket_name is provided, bucket_key' ' should be relative path from root' ' level, rather than a full s3:// url' ) self.log.info('Poking for key : s3://%s/%s', self.bucket_name, self.bucket_key) if self.wildcard_match: return self.get_hook().check_for_wildcard_key(self.bucket_key, self.bucket_name) return self.get_hook().check_for_key(self.bucket_key, self.bucket_name)
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def check_fn(self, data: List) -> bool: return any(f.get('Size', 0) > 1048576 for f in data if isinstance(f, dict))
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def __init__( self, *, check_fn: Optional[Callable[..., bool]] = None, **kwargs,
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def poke(self, context): if super().poke(context=context) is False: return False s3_objects = self.get_files(s3_hook=self.get_hook()) if not s3_objects: return False check_fn = self.check_fn if self.check_fn_user is None else self.check_fn_user return check_fn(s3_objects)
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def Enable(cls): """Enables this hook.""" cls._use = 1
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def Disable(cls): """Disables this hook.""" cls._use = 0
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def EnableMan(cls): """Enables copying the man pages.""" cls._use_man = 1
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def DisableMan(cls): """Disables copying the man pages.""" cls._use_man = 0
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def IsEnabled(cls): """Returns whether this hook is activated.""" return cls._use
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def IsManEnabled(cls): """Returns whether man pages will be copied.""" return cls._use_man
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def AddFile(cls, vFile): """Adds a required file to the hook to be copied into the initramfs.""" cls._files.append(vFile)
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def RemoveFile(cls, vFile): """Deletes a required file from the hook.""" try: cls._files.remove(vFile) except ValueError: Tools.Fail('The file "' + vFile + '" was not found on the list!')
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def PrintFiles(cls): """Prints the required files in this hook.""" for file in cls.GetFiles(): print("File: " + file)
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def GetFiles(cls): """Returns the list of required files.""" return cls._files
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def GetOptionalFiles(cls): """Returns the list of optional files.""" return cls._optional_files
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def GetDirectories(cls): """Returns the list of required directories.""" return cls._directories
fearedbliss/bliss-initramfs
[ 39, 23, 39, 2, 1407558349 ]
def main(verbose=False): D = 10 ** 6 return 3 * int((D - 5) / 7.0) + 2
dhermes/project-euler
[ 11, 3, 11, 1, 1299471955 ]
def latest_jar(): global __JARS__ return __JARS__[-1]
cosminbasca/rdftools
[ 5, 1, 5, 1, 1417629469 ]
def check_java(message=""): if call(['java', '-version'], stderr=DEVNULL) != 0: raise JavaNotFoundException( 'Java is not installed in the system path. {0}'.format(message))
cosminbasca/rdftools
[ 5, 1, 5, 1, 1417629469 ]
def run_lubm_generator(num_universities, index, generator_seed, ontology, output_path, xms=XMS, xmx=XMX): run_tool("com.rdftools.LubmGenerator", xms, xmx, "--num_universities", num_universities, "--start_index", index, "--seed", generator_seed, "--ontology", ontology, "--output_path", output_path)
cosminbasca/rdftools
[ 5, 1, 5, 1, 1417629469 ]
def run_jvmvoid_generator(source, dataset_id, output_path, xms=XMS, xmx=XMX): run_tool("com.rdftools.VoIDGenerator", xms, xmx, "--source", source, "--dataset_id", dataset_id, "--output_path", output_path)
cosminbasca/rdftools
[ 5, 1, 5, 1, 1417629469 ]
def ud_exception(w: str, tag: str) -> str: if w == "การ" or w == "ความ": return "NOUN" return tag
PyThaiNLP/pythainlp
[ 786, 237, 786, 35, 1466693846 ]
def post_process( word_tags: List[Tuple[str, str]], to_ud: bool = False
PyThaiNLP/pythainlp
[ 786, 237, 786, 35, 1466693846 ]
def setUpClass(cls): super(NeutronResourcesTestJSON, cls).setUpClass() if not CONF.orchestration.image_ref: raise cls.skipException("No image available to test") os = clients.Manager() if not CONF.service_available.neutron: raise cls.skipException("Neutron support is required") cls.network_client = os.network_client cls.stack_name = data_utils.rand_name('heat') template = cls.read_template('neutron_basic') cls.keypair_name = (CONF.orchestration.keypair_name or cls._create_keypair()['name']) cls.external_network_id = CONF.network.public_network_id tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr) mask_bits = CONF.network.tenant_network_mask_bits cls.subnet_cidr = tenant_cidr.subnet(mask_bits).next() # create the stack cls.stack_identifier = cls.create_stack( cls.stack_name, template, parameters={ 'KeyName': cls.keypair_name, 'InstanceType': CONF.orchestration.instance_type, 'ImageId': CONF.orchestration.image_ref, 'ExternalNetworkId': cls.external_network_id, 'timeout': CONF.orchestration.build_timeout, 'DNSServers': CONF.network.dns_servers, 'SubNetCidr': str(cls.subnet_cidr) }) cls.stack_id = cls.stack_identifier.split('/')[1] try: cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE') _, resources = cls.client.list_resources(cls.stack_identifier) except exceptions.TimeoutException as e: if CONF.compute_feature_enabled.console_output: # attempt to log the server console to help with debugging # the cause of the server not signalling the waitcondition # to heat. resp, body = cls.client.get_resource(cls.stack_identifier, 'Server') server_id = body['physical_resource_id'] LOG.debug('Console output for %s', server_id) resp, output = cls.servers_client.get_console_output( server_id, None) LOG.debug(output) raise e cls.test_resources = {} for resource in resources: cls.test_resources[resource['logical_resource_id']] = resource
Mirantis/tempest
[ 2, 7, 2, 1, 1327963146 ]
def test_created_resources(self): """Verifies created neutron resources.""" resources = [('Network', 'OS::Neutron::Net'), ('Subnet', 'OS::Neutron::Subnet'), ('RouterInterface', 'OS::Neutron::RouterInterface'), ('Server', 'OS::Nova::Server')] for resource_name, resource_type in resources: resource = self.test_resources.get(resource_name, None) self.assertIsInstance(resource, dict) self.assertEqual(resource_name, resource['logical_resource_id']) self.assertEqual(resource_type, resource['resource_type']) self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
Mirantis/tempest
[ 2, 7, 2, 1, 1327963146 ]
def test_created_network(self): """Verifies created network.""" network_id = self.test_resources.get('Network')['physical_resource_id'] resp, body = self.network_client.show_network(network_id) self.assertEqual('200', resp['status']) network = body['network'] self.assertIsInstance(network, dict) self.assertEqual(network_id, network['id']) self.assertEqual('NewNetwork', network['name'])
Mirantis/tempest
[ 2, 7, 2, 1, 1327963146 ]
def test_created_subnet(self): """Verifies created subnet.""" subnet_id = self.test_resources.get('Subnet')['physical_resource_id'] resp, body = self.network_client.show_subnet(subnet_id) self.assertEqual('200', resp['status']) subnet = body['subnet'] network_id = self.test_resources.get('Network')['physical_resource_id'] self.assertEqual(subnet_id, subnet['id']) self.assertEqual(network_id, subnet['network_id']) self.assertEqual('NewSubnet', subnet['name']) self.assertEqual(sorted(CONF.network.dns_servers), sorted(subnet['dns_nameservers'])) self.assertEqual(4, subnet['ip_version']) self.assertEqual(str(self.subnet_cidr), subnet['cidr'])
Mirantis/tempest
[ 2, 7, 2, 1, 1327963146 ]
def test_created_router(self): """Verifies created router.""" router_id = self.test_resources.get('Router')['physical_resource_id'] resp, body = self.network_client.show_router(router_id) self.assertEqual('200', resp['status']) router = body['router'] self.assertEqual('NewRouter', router['name']) self.assertEqual(self.external_network_id, router['external_gateway_info']['network_id']) self.assertEqual(True, router['admin_state_up'])
Mirantis/tempest
[ 2, 7, 2, 1, 1327963146 ]
def test_created_router_interface(self): """Verifies created router interface.""" router_id = self.test_resources.get('Router')['physical_resource_id'] network_id = self.test_resources.get('Network')['physical_resource_id'] subnet_id = self.test_resources.get('Subnet')['physical_resource_id'] resp, body = self.network_client.list_ports() self.assertEqual('200', resp['status']) ports = body['ports'] router_ports = filter(lambda port: port['device_id'] == router_id, ports) created_network_ports = filter(lambda port: port['network_id'] == network_id, router_ports) self.assertEqual(1, len(created_network_ports)) router_interface = created_network_ports[0] fixed_ips = router_interface['fixed_ips'] subnet_fixed_ips = filter(lambda port: port['subnet_id'] == subnet_id, fixed_ips) self.assertEqual(1, len(subnet_fixed_ips)) router_interface_ip = subnet_fixed_ips[0]['ip_address'] self.assertEqual(str(self.subnet_cidr.iter_hosts().next()), router_interface_ip)
Mirantis/tempest
[ 2, 7, 2, 1, 1327963146 ]
def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) lr_param = { "name": "hetero_lr_0", "penalty": "L2", "optimizer": "rmsprop", "tol": 0.0001, "alpha": 0.01, "max_iter": 30, "early_stop": "diff", "batch_size": 320, "learning_rate": 0.15, "init_param": { "init_method": "zeros" }, "sqn_param": { "update_interval_L": 3, "memory_M": 5, "sample_size": 5000, "random_seed": None }, "cv_param": { "n_splits": 5, "shuffle": False, "random_seed": 103, "need_cv": False }, "callback_param": { "callbacks": ["ModelCheckpoint"], "save_freq": "epoch" } } pipeline = common_tools.make_normal_dsl(config, namespace, lr_param) # dsl_json = predict_pipeline.get_predict_dsl() # conf_json = predict_pipeline.get_predict_conf() # import json # json.dump(dsl_json, open('./hetero-lr-normal-predict-dsl.json', 'w'), indent=4) # json.dump(conf_json, open('./hetero-lr-normal-predict-conf.json', 'w'), indent=4) # fit model pipeline.fit() # query component summary common_tools.prettify(pipeline.get_component("hetero_lr_0").get_summary()) common_tools.prettify(pipeline.get_component("evaluation_0").get_summary())
FederatedAI/FATE
[ 4887, 1449, 4887, 637, 1548325963 ]
def get_stats_for_region(region): try: session = boto3.Session(region_name=region) num_instances = len(list(session.resource("ec2").instances.all())) num_amis = len(list(session.resource("ec2").images.filter(Owners=["self"]))) num_vpcs = len(list(session.resource("ec2").vpcs.all())) num_enis = len(list(session.resource("ec2").network_interfaces.all())) num_volumes = len(list(session.resource("ec2").volumes.all())) except botocore.exceptions.ClientError: num_instances, num_amis, num_vpcs, num_enis, num_volumes = ["Access denied"] * 5 # type: ignore return [region, num_instances, num_amis, num_vpcs, num_enis, num_volumes]
kislyuk/aegea
[ 67, 17, 67, 23, 1456962813 ]
def __init__(self, name, data): """Initialize the Action object. Actions do not have explicit partition attributes, the are implied by the partition of the rule to which they belong. """ super(Action, self).__init__(name, partition=None) # Actions are Only supported on requests. self._data['request'] = True # Is this a forwarding action? if data.get('forward', False): self._data['forward'] = True # Yes, there are two supported forwarding actions: # forward to pool and reset, these are mutually # exclusive options. pool = data.get('pool', None) reset = data.get('reset', False) # This allows you to specify an empty node. This is # what Container Connector does. select = data.get('select', False) # This was added in 13.1.0 shutdown = data.get('shutdown', False) if pool: self._data['pool'] = pool elif reset: self._data['reset'] = reset elif select: self._data['select'] = select elif shutdown: self._data['shutdown'] = shutdown else: raise ValueError( "Unsupported forward action, must be one of reset, " "forward to pool, select, or shutdown.") # Is this a redirect action? elif data.get('redirect', False): self._data['redirect'] = True # Yes, set the location and httpReply attribute self._data['location'] = data.get('location', None) self._data['httpReply'] = data.get('httpReply', True) # Is this a setVariable action? elif data.get('setVariable', False): self._data['setVariable'] = True # Set the variable name and the value self._data['tmName'] = data.get('tmName', None) self._data['expression'] = data.get('expression', None) self._data['tcl'] = True # Is this a replace URI host action? elif data.get('replace', False) and data.get('httpHost', False): self._data['replace'] = True self._data['httpHost'] = True self._data['value'] = data.get('value', None) # Is this a replace URI path action? elif data.get('replace', False) and data.get('httpUri', False) and \ data.get('path', False): self._data['replace'] = True self._data['httpUri'] = True self._data['path'] = data.get('path', None) self._data['value'] = data.get('value', None) # Is this a replace URI action? elif data.get('replace', False) and data.get('httpUri', False): self._data['replace'] = True self._data['httpUri'] = True self._data['value'] = data.get('value', None) else: # Only forward, redirect and setVariable are supported. raise ValueError("Unsupported action, must be one of forward, " "redirect, setVariable, replace, or reset.")
f5devcentral/f5-cccl
[ 10, 45, 10, 23, 1491942422 ]
def __str__(self): return str(self._data)
f5devcentral/f5-cccl
[ 10, 45, 10, 23, 1491942422 ]
def setUp(self): self.clock = task.Clock() self.clock.spawnProcess = Mock() treq_mock = create_autospec(treq) response_mock = Mock() response_mock.text.return_value = defer.succeed("") treq_mock.request.return_value = defer.succeed(response_mock) self.config = Config.default() self.config.path = "./" try: os.makedirs("clients/algo") except FileExistsError: pass self.remote = MinerStatRemoteProtocol(self.config, treq_mock) self.rig = Rig(self.config, remote=self.remote, reactor=self.clock) self.rig.start = Mock(return_value=defer.succeed(None)) self.rig.stop = Mock(return_value=defer.succeed(None)) self.service = MinerStatService(self.rig)
dpnova/pynerstat
[ 4, 1, 4, 14, 1498828453 ]
def test_start_stop(self): yield self.service.startService() self.service.rig.start.assert_called_with() yield self.service.stopService() self.service.rig.stop.assert_called_with()
dpnova/pynerstat
[ 4, 1, 4, 14, 1498828453 ]
def setUp(self): self.config = Config("a", "b", "w", "p") self.prot = MinerStatRemoteProtocol(self.config)
dpnova/pynerstat
[ 4, 1, 4, 14, 1498828453 ]
def test_dlconf(self): pass
dpnova/pynerstat
[ 4, 1, 4, 14, 1498828453 ]
def test_algo_check(self): pass
dpnova/pynerstat
[ 4, 1, 4, 14, 1498828453 ]
def test_poll_remote(self): pass
dpnova/pynerstat
[ 4, 1, 4, 14, 1498828453 ]
def __init__(self, adapter_agent, config): self.adapter_agent = adapter_agent self.config = config self.descriptor = Adapter( id=self.name, vendor='Voltha project', version='0.1', config=AdapterConfig(log_level=LogLevel.INFO) ) self.incoming_messages = DeferredQueue()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def stop(self): log.debug('stopping') log.info('stopped')
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def device_types(self): return DeviceTypes(items=self.supported_device_types)
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def change_master_state(self, master): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def reconcile_device(self, device): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def disable_device(self, device): device.oper_status = OperStatus.UNKNOWN self.adapter_agent.update_device(device)
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def reboot_device(self, device): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def get_image_download_status(self, device, request): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def activate_image_update(self, device, request): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def delete_device(self, device): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def update_pm_config(self, device, pm_configs): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def _simulate_device_activation(self, device): # first we verify that we got parent reference and proxy info assert device.parent_id assert device.proxy_address.device_id assert device.proxy_address.channel_id # we pretend that we were able to contact the device and obtain # additional information about it device.vendor = 'simulated onu adapter' device.model = 'n/a' device.hardware_version = 'n/a' device.firmware_version = 'n/a' device.serial_number = uuid4().hex device.connect_status = ConnectStatus.REACHABLE image1 = Image(name="onu_candidate1", version="1.0", hash="1234567892", install_datetime=datetime.datetime.utcnow().isoformat(), is_active=True, is_committed=True, is_valid=True) image2 = Image(name="onu_candidate2", version="1.0", hash="1234567893", install_datetime=datetime.datetime.utcnow().isoformat(), is_active=False, is_committed=False, is_valid=True) device.images.image.extend([image1, image2]) self.adapter_agent.update_device(device) # then shortly after we create some ports for the device yield asleep(0.05) uni_port = Port( port_no=2, label='UNI facing Ethernet port', type=Port.ETHERNET_UNI, admin_state=AdminState.ENABLED, oper_status=OperStatus.ACTIVE ) self.adapter_agent.add_port(device.id, uni_port) self.adapter_agent.add_port(device.id, Port( port_no=1, label='PON port', type=Port.PON_ONU, admin_state=AdminState.ENABLED, oper_status=OperStatus.ACTIVE, peers=[ Port.PeerPort( device_id=device.parent_id, port_no=device.parent_port_no ) ] )) # TODO adding vports to the logical device shall be done by agent? # then we create the logical device port that corresponds to the UNI # port of the device yield asleep(0.05) # obtain logical device id parent_device = self.adapter_agent.get_device(device.parent_id) logical_device_id = parent_device.parent_id assert logical_device_id # we are going to use the proxy_address.channel_id as unique number # and name for the virtual ports, as this is guaranteed to be unique # in the context of the OLT port, so it is also unique in the context # of the logical device port_no = device.proxy_address.channel_id cap = OFPPF_1GB_FD | OFPPF_FIBER self.adapter_agent.add_logical_port(logical_device_id, LogicalPort( id=str(port_no), ofp_port=ofp_port( port_no=port_no, hw_addr=mac_str_to_tuple('00:00:00:00:00:%02x' % port_no), name='uni-{}'.format(port_no), config=0, state=OFPPS_LIVE, curr=cap, advertised=cap, peer=cap, curr_speed=OFPPF_1GB_FD, max_speed=OFPPF_1GB_FD ), device_id=device.id, device_port_no=uni_port.port_no )) # simulate a proxied message sending and receving a reply reply = yield self._simulate_message_exchange(device) # and finally update to "ACTIVE" device = self.adapter_agent.get_device(device.id) device.oper_status = OperStatus.ACTIVE self.adapter_agent.update_device(device)
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def update_flows_incrementally(self, device, flow_changes, group_changes): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def receive_proxied_message(self, proxy_address, msg): # just place incoming message to a list self.incoming_messages.put((proxy_address, msg))
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def update_interface(self, device, data): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def receive_onu_detect_state(self, device_id, state): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def update_tcont(self, device, tcont_data, traffic_descriptor_data): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def create_gemport(self, device, data): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def remove_gemport(self, device, data): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def update_multicast_gemport(self, device, data): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def create_multicast_distribution_set(self, device, data): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def remove_multicast_distribution_set(self, device, data): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def unsuppress_alarm(self, filter): raise NotImplementedError()
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def _simulate_message_exchange(self, device): # register for receiving async messages self.adapter_agent.register_for_proxied_messages(device.proxy_address) # reset incoming message queue while self.incoming_messages.pending: _ = yield self.incoming_messages.get() # construct message msg = 'test message' # send message self.adapter_agent.send_proxied_message(device.proxy_address, msg) # wait till we detect incoming message yield self.incoming_messages.get() # by returning we allow the device to be shown as active, which # indirectly verified that message passing works
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def create_tasks(self, evidence): """Create task for Partition Enumeration. Args: evidence: List of evidence objects to process Returns: A list of tasks to schedule. """ tasks = [PartitionEnumerationTask() for _ in evidence] return tasks
google/turbinia
[ 625, 155, 625, 117, 1442335840 ]
def check_response(func): """ Decorator checking first REST response. :return: """ def _decorator(self, *args, **kwargs): try: response = func(self, *args, **kwargs) except ServerNotFoundError as e: raise OperationRetry( 'Warning: {0}. ' 'If problem persists, error may be fatal.'.format( e.message)) if 'error' in response: self.logger.error('Response with error {0}' .format(response['error'])) raise GCPError(response['error']) return response return wraps(func)(_decorator)
cloudify-cosmo/cloudify-gcp-plugin
[ 6, 13, 6, 6, 1428599218 ]
def __init__(self, config, logger, scope=constants.COMPUTE_SCOPE, discovery=constants.COMPUTE_DISCOVERY, api_version=constants.API_V1): """ GoogleCloudApi class constructor. Create API discovery object that will be making GCP REST API calls. :param config: dictionary with object properties :param logger: logger object that the class methods will be logging to :return: """ self.auth = config['auth'] self.config = config self.logger = logger.getChild('GCP') self.scope = scope self.__discovery = discovery self.api_version = api_version
cloudify-cosmo/cloudify-gcp-plugin
[ 6, 13, 6, 6, 1428599218 ]
def discovery(self): """ Lazily load the discovery so we don't make API calls during __init__ """ if hasattr(self, '_discovery'): return self._discovery self._discovery = self.create_discovery(self.__discovery, self.scope, self.api_version) return self._discovery
cloudify-cosmo/cloudify-gcp-plugin
[ 6, 13, 6, 6, 1428599218 ]
def create_discovery(self, discovery, scope, api_version): """ Create Google Cloud API discovery object and perform authentication. :param discovery: name of the API discovery to be created :param scope: scope the API discovery will have :param api_version: version of the API :return: discovery object :raise: GCPError if there is a problem with service account JSON file: e.g. the file is not under the given path or it has wrong permissions """ # Crypto.Random.atfork() must be called here because celery doesn't do # it atfork() try: credentials = self.get_credentials(scope) http = httplib2.Http() credentials.authorize(http) return build(discovery, api_version, http=http) except IOError as e: self.logger.error(str(e)) raise GCPError(str(e))
cloudify-cosmo/cloudify-gcp-plugin
[ 6, 13, 6, 6, 1428599218 ]
def __init__(self, config, logger, name, additional_settings=None, scope=constants.COMPUTE_SCOPE, discovery=constants.COMPUTE_DISCOVERY, api_version=constants.API_V1): """ GoogleCloudPlatform class constructor. Create API discovery object that will be making GCP REST API calls. :param config: dictionary with project properties: path to auth file, project and zone :param logger: logger object that the class methods will be logging to :param name: name of GCP resource represented by this object :param scope: scope string of GCP connection :param discovery: name of Google service :param api_version: version of used API to communicate with GCP :return: """ super(GoogleCloudPlatform, self).__init__(config, logger, scope, discovery, api_version) self.auth = config['auth'] self.project = config['project'] self.zone = config['zone'] self.name = name self.body = additional_settings if additional_settings else {}
cloudify-cosmo/cloudify-gcp-plugin
[ 6, 13, 6, 6, 1428599218 ]
def get_common_instance_metadata(self): """ Get project's common instance metadata. :return: CommonInstanceMetadata list extracted from REST response get project metadata. """ self.logger.info( 'Get commonInstanceMetadata for project {0}'.format(self.project)) metadata = self.discovery.projects().get( project=self.project).execute() return metadata['commonInstanceMetadata']
cloudify-cosmo/cloudify-gcp-plugin
[ 6, 13, 6, 6, 1428599218 ]
def ZONES(self): if not hasattr(self, '_ZONES'): zones = {} request = self.discovery.zones().list(project=self.project) while request is not None: response = request.execute() for zone in response['items']: zones[zone['name']] = zone zone['region_name'] = basename(zone['region']) request = self.discovery.zones().list_next( previous_request=request, previous_response=response) self._ZONES = zones return self._ZONES
cloudify-cosmo/cloudify-gcp-plugin
[ 6, 13, 6, 6, 1428599218 ]
def __init__(self, message): super(GCPError, self).__init__(message)
cloudify-cosmo/cloudify-gcp-plugin
[ 6, 13, 6, 6, 1428599218 ]
def __init__(self, device, cpu=GUESTSHELL_CPU, memory=GUESTSHELL_MEMORY, disk=GUESTSHELL_DISK, log=None): self.device = device self.guestshell = partial(self.device.api.exec_opcmd, msg_type='cli_show_ascii') self.cli = self.device.api.exec_opcmd self.log = log or logging.getLogger() self.sz_max = {} self._get_sz_max() self.sz_need = _guestshell.Resources( cpu=min(cpu, self.sz_max['cpu']), memory=min(memory, self.sz_max['memory']), disk=min(disk, self.sz_max['disk'])) self.sz_has = None self._state = None self.exists = False
Apstra/aeon-venos
[ 2, 4, 2, 1, 1463407794 ]
def state(self): cmd = 'show virtual-service detail name guestshell+' try: got = self.cli(cmd) except CommandError: # means there is no guestshell self.exists = False self._state = 'None' return self._state try: self._state = got['TABLE_detail']['ROW_detail']['state'] return self._state except TypeError: # means there is no guestshell self.exists = False self._state = 'None' return self._state
Apstra/aeon-venos
[ 2, 4, 2, 1, 1463407794 ]
def size(self): self._get_sz_info() return self.sz_has
Apstra/aeon-venos
[ 2, 4, 2, 1, 1463407794 ]
def setup(self): self.log.info("/START(guestshell): setup") state = self.state self.log.info("/INFO(guestshell): current state: %s" % state) if 'Activated' == state: self._get_sz_info() if self.sz_need != self.sz_has: self.log.info("/INFO(guestshell): need to resize, please wait...") self.resize() self.reboot() else: self.log.info( "/INFO(guestshell): not activated, enabling with proper size, " "please wait ...") self.resize() self.enable() self._get_sz_info() self.log.info("/END(guestshell): setup")
Apstra/aeon-venos
[ 2, 4, 2, 1, 1463407794 ]
def enable(self): self.guestshell('guestshell enable') self._wait_state('Activated')
Apstra/aeon-venos
[ 2, 4, 2, 1, 1463407794 ]
def disable(self): self.guestshell('guestshell disable') self._wait_state('Deactivated')
Apstra/aeon-venos
[ 2, 4, 2, 1, 1463407794 ]
def resize_memory(self, memory): value = min(memory, self.sz_max['memory']) self.guestshell('guestshell resize memory {}'.format(value))
Apstra/aeon-venos
[ 2, 4, 2, 1, 1463407794 ]
def resize(self): self.resize_cpu(self.sz_need.cpu) self.resize_memory(self.sz_need.memory) self.resize_disk(self.sz_need.disk)
Apstra/aeon-venos
[ 2, 4, 2, 1, 1463407794 ]
def sudoers(self, enable): """ This method is used to enable/disable bash sudo commands running through the guestshell virtual service. By default sudo access is prevented due to the setting in the 'sudoers' file. Therefore the setting must be disabled in the file to enable sudo commands. This method assumes that the "bash-shell" feature is enabled. @@@ TO-DO: have a mech to check &| control bash-shell feature support :param enable: True - enables sudo commands False - disables sudo commands :return: returns the response of the sed command needed to make the file change """ f_sudoers = "/isan/vdc_1/virtual-instance/guestshell+/rootfs/etc/sudoers" if enable is True: sed_cmd = r" 's/\(^Defaults *requiretty\)/#\1/g' " elif enable is False: sed_cmd = r" 's/^#\(Defaults *requiretty\)/\1/g' " else: raise RuntimeError('enable must be True or False') self.guestshell("run bash sudo sed -i" + sed_cmd + f_sudoers)
Apstra/aeon-venos
[ 2, 4, 2, 1, 1463407794 ]